rpmh-rsc.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
  7. #include <linux/atomic.h>
  8. #include <linux/cpu_pm.h>
  9. #include <linux/delay.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/kernel.h>
  14. #include <linux/ktime.h>
  15. #include <linux/list.h>
  16. #include <linux/module.h>
  17. #include <linux/notifier.h>
  18. #include <linux/of.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_domain.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/wait.h>
  27. #include <clocksource/arm_arch_timer.h>
  28. #include <soc/qcom/cmd-db.h>
  29. #include <soc/qcom/tcs.h>
  30. #include <dt-bindings/soc/qcom,rpmh-rsc.h>
  31. #include "rpmh-internal.h"
  32. #define CREATE_TRACE_POINTS
  33. #include "trace-rpmh.h"
  34. #define RSC_DRV_ID 0
  35. #define MAJOR_VER_MASK 0xFF
  36. #define MAJOR_VER_SHIFT 16
  37. #define MINOR_VER_MASK 0xFF
  38. #define MINOR_VER_SHIFT 8
  39. enum {
  40. RSC_DRV_TCS_OFFSET,
  41. RSC_DRV_CMD_OFFSET,
  42. DRV_SOLVER_CONFIG,
  43. DRV_PRNT_CHLD_CONFIG,
  44. RSC_DRV_IRQ_ENABLE,
  45. RSC_DRV_IRQ_STATUS,
  46. RSC_DRV_IRQ_CLEAR,
  47. RSC_DRV_CMD_WAIT_FOR_CMPL,
  48. RSC_DRV_CONTROL,
  49. RSC_DRV_STATUS,
  50. RSC_DRV_CMD_ENABLE,
  51. RSC_DRV_CMD_MSGID,
  52. RSC_DRV_CMD_ADDR,
  53. RSC_DRV_CMD_DATA,
  54. RSC_DRV_CMD_STATUS,
  55. RSC_DRV_CMD_RESP_DATA,
  56. };
  57. /* DRV HW Solver Configuration Information Register */
  58. #define DRV_HW_SOLVER_MASK 1
  59. #define DRV_HW_SOLVER_SHIFT 24
  60. /* DRV TCS Configuration Information Register */
  61. #define DRV_NUM_TCS_MASK 0x3F
  62. #define DRV_NUM_TCS_SHIFT 6
  63. #define DRV_NCPT_MASK 0x1F
  64. #define DRV_NCPT_SHIFT 27
  65. /* Offsets for CONTROL TCS Registers */
  66. #define RSC_DRV_CTL_TCS_DATA_HI 0x38
  67. #define RSC_DRV_CTL_TCS_DATA_HI_MASK 0xFFFFFF
  68. #define RSC_DRV_CTL_TCS_DATA_HI_VALID BIT(31)
  69. #define RSC_DRV_CTL_TCS_DATA_LO 0x40
  70. #define RSC_DRV_CTL_TCS_DATA_LO_MASK 0xFFFFFFFF
  71. #define RSC_DRV_CTL_TCS_DATA_SIZE 32
  72. #define TCS_AMC_MODE_ENABLE BIT(16)
  73. #define TCS_AMC_MODE_TRIGGER BIT(24)
  74. /* TCS CMD register bit mask */
  75. #define CMD_MSGID_LEN 8
  76. #define CMD_MSGID_RESP_REQ BIT(8)
  77. #define CMD_MSGID_WRITE BIT(16)
  78. #define CMD_STATUS_ISSUED BIT(8)
  79. #define CMD_STATUS_COMPL BIT(16)
  80. /*
  81. * Here's a high level overview of how all the registers in RPMH work
  82. * together:
  83. *
  84. * - The main rpmh-rsc address is the base of a register space that can
  85. * be used to find overall configuration of the hardware
  86. * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
  87. * space are all the TCS blocks. The offset of the TCS blocks is
  88. * specified in the device tree by "qcom,tcs-offset" and used to
  89. * compute tcs_base.
  90. * - TCS blocks come one after another. Type, count, and order are
  91. * specified by the device tree as "qcom,tcs-config".
  92. * - Each TCS block has some registers, then space for up to 16 commands.
  93. * Note that though address space is reserved for 16 commands, fewer
  94. * might be present. See ncpt (num cmds per TCS).
  95. *
  96. * Here's a picture:
  97. *
  98. * +---------------------------------------------------+
  99. * |RSC |
  100. * | ctrl |
  101. * | |
  102. * | Drvs: |
  103. * | +-----------------------------------------------+ |
  104. * | |DRV0 | |
  105. * | | ctrl/config | |
  106. * | | IRQ | |
  107. * | | | |
  108. * | | TCSes: | |
  109. * | | +------------------------------------------+ | |
  110. * | | |TCS0 | | | | | | | | | | | | | | |
  111. * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
  112. * | | | | | | | | | | | | | | | | | |
  113. * | | +------------------------------------------+ | |
  114. * | | +------------------------------------------+ | |
  115. * | | |TCS1 | | | | | | | | | | | | | | |
  116. * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
  117. * | | | | | | | | | | | | | | | | | |
  118. * | | +------------------------------------------+ | |
  119. * | | +------------------------------------------+ | |
  120. * | | |TCS2 | | | | | | | | | | | | | | |
  121. * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
  122. * | | | | | | | | | | | | | | | | | |
  123. * | | +------------------------------------------+ | |
  124. * | | ...... | |
  125. * | +-----------------------------------------------+ |
  126. * | +-----------------------------------------------+ |
  127. * | |DRV1 | |
  128. * | | (same as DRV0) | |
  129. * | +-----------------------------------------------+ |
  130. * | ...... |
  131. * +---------------------------------------------------+
  132. */
  133. #define USECS_TO_CYCLES(time_usecs) \
  134. xloops_to_cycles((time_usecs) * 0x10C7UL)
  135. static inline unsigned long xloops_to_cycles(u64 xloops)
  136. {
  137. return (xloops * loops_per_jiffy * HZ) >> 32;
  138. }
  139. static u32 rpmh_rsc_reg_offset_ver_2_7[] = {
  140. [RSC_DRV_TCS_OFFSET] = 672,
  141. [RSC_DRV_CMD_OFFSET] = 20,
  142. [DRV_SOLVER_CONFIG] = 0x04,
  143. [DRV_PRNT_CHLD_CONFIG] = 0x0C,
  144. [RSC_DRV_IRQ_ENABLE] = 0x00,
  145. [RSC_DRV_IRQ_STATUS] = 0x04,
  146. [RSC_DRV_IRQ_CLEAR] = 0x08,
  147. [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x10,
  148. [RSC_DRV_CONTROL] = 0x14,
  149. [RSC_DRV_STATUS] = 0x18,
  150. [RSC_DRV_CMD_ENABLE] = 0x1C,
  151. [RSC_DRV_CMD_MSGID] = 0x30,
  152. [RSC_DRV_CMD_ADDR] = 0x34,
  153. [RSC_DRV_CMD_DATA] = 0x38,
  154. [RSC_DRV_CMD_STATUS] = 0x3C,
  155. [RSC_DRV_CMD_RESP_DATA] = 0x40,
  156. };
  157. static u32 rpmh_rsc_reg_offset_ver_3_0[] = {
  158. [RSC_DRV_TCS_OFFSET] = 672,
  159. [RSC_DRV_CMD_OFFSET] = 24,
  160. [DRV_SOLVER_CONFIG] = 0x04,
  161. [DRV_PRNT_CHLD_CONFIG] = 0x0C,
  162. [RSC_DRV_IRQ_ENABLE] = 0x00,
  163. [RSC_DRV_IRQ_STATUS] = 0x04,
  164. [RSC_DRV_IRQ_CLEAR] = 0x08,
  165. [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x20,
  166. [RSC_DRV_CONTROL] = 0x24,
  167. [RSC_DRV_STATUS] = 0x28,
  168. [RSC_DRV_CMD_ENABLE] = 0x2C,
  169. [RSC_DRV_CMD_MSGID] = 0x34,
  170. [RSC_DRV_CMD_ADDR] = 0x38,
  171. [RSC_DRV_CMD_DATA] = 0x3C,
  172. [RSC_DRV_CMD_STATUS] = 0x40,
  173. [RSC_DRV_CMD_RESP_DATA] = 0x44,
  174. };
  175. static inline void __iomem *
  176. tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
  177. {
  178. return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg;
  179. }
  180. static inline void __iomem *
  181. tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
  182. {
  183. return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id;
  184. }
  185. static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
  186. int cmd_id)
  187. {
  188. return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
  189. }
  190. static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
  191. {
  192. return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
  193. }
  194. static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
  195. int cmd_id, u32 data)
  196. {
  197. writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
  198. }
  199. static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
  200. u32 data)
  201. {
  202. writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
  203. }
  204. static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
  205. u32 data)
  206. {
  207. int i;
  208. writel(data, tcs_reg_addr(drv, reg, tcs_id));
  209. /*
  210. * Wait until we read back the same value. Use a counter rather than
  211. * ktime for timeout since this may be called after timekeeping stops.
  212. */
  213. for (i = 0; i < USEC_PER_SEC; i++) {
  214. if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
  215. return;
  216. udelay(1);
  217. }
  218. pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
  219. data, tcs_id, reg);
  220. }
  221. /**
  222. * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
  223. * @drv: The RSC controller.
  224. * @type: SLEEP_TCS or WAKE_TCS
  225. *
  226. * This will clear the "slots" variable of the given tcs_group and also
  227. * tell the hardware to forget about all entries.
  228. *
  229. * The caller must ensure that no other RPMH actions are happening when this
  230. * function is called, since otherwise the device may immediately become
  231. * used again even before this function exits.
  232. */
  233. static void tcs_invalidate(struct rsc_drv *drv, int type)
  234. {
  235. int m;
  236. struct tcs_group *tcs = &drv->tcs[type];
  237. /* Caller ensures nobody else is running so no lock */
  238. if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
  239. return;
  240. for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
  241. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0);
  242. bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
  243. }
  244. /**
  245. * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
  246. * @drv: The RSC controller.
  247. *
  248. * The caller must ensure that no other RPMH actions are happening when this
  249. * function is called, since otherwise the device may immediately become
  250. * used again even before this function exits.
  251. */
  252. void rpmh_rsc_invalidate(struct rsc_drv *drv)
  253. {
  254. tcs_invalidate(drv, SLEEP_TCS);
  255. tcs_invalidate(drv, WAKE_TCS);
  256. }
  257. /**
  258. * get_tcs_for_msg() - Get the tcs_group used to send the given message.
  259. * @drv: The RSC controller.
  260. * @msg: The message we want to send.
  261. *
  262. * This is normally pretty straightforward except if we are trying to send
  263. * an ACTIVE_ONLY message but don't have any active_only TCSes.
  264. *
  265. * Return: A pointer to a tcs_group or an ERR_PTR.
  266. */
  267. static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
  268. const struct tcs_request *msg)
  269. {
  270. int type;
  271. struct tcs_group *tcs;
  272. switch (msg->state) {
  273. case RPMH_ACTIVE_ONLY_STATE:
  274. type = ACTIVE_TCS;
  275. break;
  276. case RPMH_WAKE_ONLY_STATE:
  277. type = WAKE_TCS;
  278. break;
  279. case RPMH_SLEEP_STATE:
  280. type = SLEEP_TCS;
  281. break;
  282. default:
  283. return ERR_PTR(-EINVAL);
  284. }
  285. /*
  286. * If we are making an active request on a RSC that does not have a
  287. * dedicated TCS for active state use, then re-purpose a wake TCS to
  288. * send active votes. This is safe because we ensure any active-only
  289. * transfers have finished before we use it (maybe by running from
  290. * the last CPU in PM code).
  291. */
  292. tcs = &drv->tcs[type];
  293. if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
  294. tcs = &drv->tcs[WAKE_TCS];
  295. return tcs;
  296. }
  297. /**
  298. * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
  299. * @drv: The RSC controller.
  300. * @tcs_id: The global ID of this TCS.
  301. *
  302. * For ACTIVE_ONLY transfers we want to call back into the client when the
  303. * transfer finishes. To do this we need the "request" that the client
  304. * originally provided us. This function grabs the request that we stashed
  305. * when we started the transfer.
  306. *
  307. * This only makes sense for ACTIVE_ONLY transfers since those are the only
  308. * ones we track sending (the only ones we enable interrupts for and the only
  309. * ones we call back to the client for).
  310. *
  311. * Return: The stashed request.
  312. */
  313. static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
  314. int tcs_id)
  315. {
  316. struct tcs_group *tcs;
  317. int i;
  318. for (i = 0; i < TCS_TYPE_NR; i++) {
  319. tcs = &drv->tcs[i];
  320. if (tcs->mask & BIT(tcs_id))
  321. return tcs->req[tcs_id - tcs->offset];
  322. }
  323. return NULL;
  324. }
  325. /**
  326. * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
  327. * @drv: The controller.
  328. * @tcs_id: The global ID of this TCS.
  329. * @trigger: If true then untrigger/retrigger. If false then just untrigger.
  330. *
  331. * In the normal case we only ever call with "trigger=true" to start a
  332. * transfer. That will un-trigger/disable the TCS from the last transfer
  333. * then trigger/enable for this transfer.
  334. *
  335. * If we borrowed a wake TCS for an active-only transfer we'll also call
  336. * this function with "trigger=false" to just do the un-trigger/disable
  337. * before using the TCS for wake purposes again.
  338. *
  339. * Note that the AP is only in charge of triggering active-only transfers.
  340. * The AP never triggers sleep/wake values using this function.
  341. */
  342. static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
  343. {
  344. u32 enable;
  345. u32 reg = drv->regs[RSC_DRV_CONTROL];
  346. /*
  347. * HW req: Clear the DRV_CONTROL and enable TCS again
  348. * While clearing ensure that the AMC mode trigger is cleared
  349. * and then the mode enable is cleared.
  350. */
  351. enable = read_tcs_reg(drv, reg, tcs_id);
  352. enable &= ~TCS_AMC_MODE_TRIGGER;
  353. write_tcs_reg_sync(drv, reg, tcs_id, enable);
  354. enable &= ~TCS_AMC_MODE_ENABLE;
  355. write_tcs_reg_sync(drv, reg, tcs_id, enable);
  356. if (trigger) {
  357. /* Enable the AMC mode on the TCS and then trigger the TCS */
  358. enable = TCS_AMC_MODE_ENABLE;
  359. write_tcs_reg_sync(drv, reg, tcs_id, enable);
  360. enable |= TCS_AMC_MODE_TRIGGER;
  361. write_tcs_reg(drv, reg, tcs_id, enable);
  362. }
  363. }
  364. /**
  365. * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
  366. * @drv: The controller.
  367. * @tcs_id: The global ID of this TCS.
  368. * @enable: If true then enable; if false then disable
  369. *
  370. * We only ever call this when we borrow a wake TCS for an active-only
  371. * transfer. For active-only TCSes interrupts are always left enabled.
  372. */
  373. static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
  374. {
  375. u32 data;
  376. u32 reg = drv->regs[RSC_DRV_IRQ_ENABLE];
  377. data = readl_relaxed(drv->tcs_base + reg);
  378. if (enable)
  379. data |= BIT(tcs_id);
  380. else
  381. data &= ~BIT(tcs_id);
  382. writel_relaxed(data, drv->tcs_base + reg);
  383. }
  384. /**
  385. * tcs_tx_done() - TX Done interrupt handler.
  386. * @irq: The IRQ number (ignored).
  387. * @p: Pointer to "struct rsc_drv".
  388. *
  389. * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
  390. * IRQ for) when a transfer is done.
  391. *
  392. * Return: IRQ_HANDLED
  393. */
  394. static irqreturn_t tcs_tx_done(int irq, void *p)
  395. {
  396. struct rsc_drv *drv = p;
  397. int i;
  398. unsigned long irq_status;
  399. const struct tcs_request *req;
  400. irq_status = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]);
  401. for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) {
  402. req = get_req_from_tcs(drv, i);
  403. if (WARN_ON(!req))
  404. goto skip;
  405. trace_rpmh_tx_done(drv, i, req);
  406. /* Clear AMC trigger & enable modes and
  407. * disable interrupt for this TCS
  408. */
  409. __tcs_set_trigger(drv, i, false);
  410. skip:
  411. /* Reclaim the TCS */
  412. write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
  413. writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]);
  414. spin_lock(&drv->lock);
  415. clear_bit(i, drv->tcs_in_use);
  416. /*
  417. * Disable interrupt for WAKE TCS to avoid being
  418. * spammed with interrupts coming when the solver
  419. * sends its wake votes.
  420. */
  421. if (!drv->tcs[ACTIVE_TCS].num_tcs)
  422. enable_tcs_irq(drv, i, false);
  423. spin_unlock(&drv->lock);
  424. wake_up(&drv->tcs_wait);
  425. if (req)
  426. rpmh_tx_done(req);
  427. }
  428. return IRQ_HANDLED;
  429. }
  430. /**
  431. * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
  432. * @drv: The controller.
  433. * @tcs_id: The global ID of this TCS.
  434. * @cmd_id: The index within the TCS to start writing.
  435. * @msg: The message we want to send, which will contain several addr/data
  436. * pairs to program (but few enough that they all fit in one TCS).
  437. *
  438. * This is used for all types of transfers (active, sleep, and wake).
  439. */
  440. static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
  441. const struct tcs_request *msg)
  442. {
  443. u32 msgid;
  444. u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
  445. u32 cmd_enable = 0;
  446. struct tcs_cmd *cmd;
  447. int i, j;
  448. /* Convert all commands to RR when the request has wait_for_compl set */
  449. cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
  450. for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
  451. cmd = &msg->cmds[i];
  452. cmd_enable |= BIT(j);
  453. msgid = cmd_msgid;
  454. /*
  455. * Additionally, if the cmd->wait is set, make the command
  456. * response reqd even if the overall request was fire-n-forget.
  457. */
  458. msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
  459. write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, j, msgid);
  460. write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, j, cmd->addr);
  461. write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, j, cmd->data);
  462. trace_rpmh_send_msg(drv, tcs_id, msg->state, j, msgid, cmd);
  463. }
  464. cmd_enable |= read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id);
  465. write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, cmd_enable);
  466. }
  467. /**
  468. * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
  469. * @drv: The controller.
  470. * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
  471. * @msg: The message we want to send, which will contain several addr/data
  472. * pairs to program (but few enough that they all fit in one TCS).
  473. *
  474. * This will walk through the TCSes in the group and check if any of them
  475. * appear to be sending to addresses referenced in the message. If it finds
  476. * one it'll return -EBUSY.
  477. *
  478. * Only for use for active-only transfers.
  479. *
  480. * Must be called with the drv->lock held since that protects tcs_in_use.
  481. *
  482. * Return: 0 if nothing in flight or -EBUSY if we should try again later.
  483. * The caller must re-enable interrupts between tries since that's
  484. * the only way tcs_in_use will ever be updated and the only way
  485. * RSC_DRV_CMD_ENABLE will ever be cleared.
  486. */
  487. static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
  488. const struct tcs_request *msg)
  489. {
  490. unsigned long curr_enabled;
  491. u32 addr;
  492. int j, k;
  493. int i = tcs->offset;
  494. for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) {
  495. curr_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i);
  496. for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
  497. addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
  498. for (k = 0; k < msg->num_cmds; k++) {
  499. if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
  500. return -EBUSY;
  501. }
  502. }
  503. }
  504. return 0;
  505. }
  506. /**
  507. * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
  508. * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
  509. * we borrowed it because there are zero active-only ones).
  510. *
  511. * Must be called with the drv->lock held since that protects tcs_in_use.
  512. *
  513. * Return: The first tcs that's free or -EBUSY if all in use.
  514. */
  515. static int find_free_tcs(struct tcs_group *tcs)
  516. {
  517. const struct rsc_drv *drv = tcs->drv;
  518. unsigned long i;
  519. unsigned long max = tcs->offset + tcs->num_tcs;
  520. i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset);
  521. if (i >= max)
  522. return -EBUSY;
  523. return i;
  524. }
  525. /**
  526. * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
  527. * @drv: The controller.
  528. * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
  529. * @msg: The data to be sent.
  530. *
  531. * Claims a tcs in the given tcs_group while making sure that no existing cmd
  532. * is in flight that would conflict with the one in @msg.
  533. *
  534. * Context: Must be called with the drv->lock held since that protects
  535. * tcs_in_use.
  536. *
  537. * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
  538. * or the tcs_group is full.
  539. */
  540. static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
  541. const struct tcs_request *msg)
  542. {
  543. int ret;
  544. /*
  545. * The h/w does not like if we send a request to the same address,
  546. * when one is already in-flight or being processed.
  547. */
  548. ret = check_for_req_inflight(drv, tcs, msg);
  549. if (ret)
  550. return ret;
  551. return find_free_tcs(tcs);
  552. }
  553. /**
  554. * rpmh_rsc_send_data() - Write / trigger active-only message.
  555. * @drv: The controller.
  556. * @msg: The data to be sent.
  557. *
  558. * NOTES:
  559. * - This is only used for "ACTIVE_ONLY" since the limitations of this
  560. * function don't make sense for sleep/wake cases.
  561. * - To do the transfer, we will grab a whole TCS for ourselves--we don't
  562. * try to share. If there are none available we'll wait indefinitely
  563. * for a free one.
  564. * - This function will not wait for the commands to be finished, only for
  565. * data to be programmed into the RPMh. See rpmh_tx_done() which will
  566. * be called when the transfer is fully complete.
  567. * - This function must be called with interrupts enabled. If the hardware
  568. * is busy doing someone else's transfer we need that transfer to fully
  569. * finish so that we can have the hardware, and to fully finish it needs
  570. * the interrupt handler to run. If the interrupts is set to run on the
  571. * active CPU this can never happen if interrupts are disabled.
  572. *
  573. * Return: 0 on success, -EINVAL on error.
  574. */
  575. int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
  576. {
  577. struct tcs_group *tcs;
  578. int tcs_id;
  579. might_sleep();
  580. tcs = get_tcs_for_msg(drv, msg);
  581. if (IS_ERR(tcs))
  582. return PTR_ERR(tcs);
  583. spin_lock_irq(&drv->lock);
  584. /* Wait forever for a free tcs. It better be there eventually! */
  585. wait_event_lock_irq(drv->tcs_wait,
  586. (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
  587. drv->lock);
  588. tcs->req[tcs_id - tcs->offset] = msg;
  589. set_bit(tcs_id, drv->tcs_in_use);
  590. if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
  591. /*
  592. * Clear previously programmed WAKE commands in selected
  593. * repurposed TCS to avoid triggering them. tcs->slots will be
  594. * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
  595. */
  596. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
  597. enable_tcs_irq(drv, tcs_id, true);
  598. }
  599. spin_unlock_irq(&drv->lock);
  600. /*
  601. * These two can be done after the lock is released because:
  602. * - We marked "tcs_in_use" under lock.
  603. * - Once "tcs_in_use" has been marked nobody else could be writing
  604. * to these registers until the interrupt goes off.
  605. * - The interrupt can't go off until we trigger w/ the last line
  606. * of __tcs_set_trigger() below.
  607. */
  608. __tcs_buffer_write(drv, tcs_id, 0, msg);
  609. __tcs_set_trigger(drv, tcs_id, true);
  610. return 0;
  611. }
  612. /**
  613. * find_slots() - Find a place to write the given message.
  614. * @tcs: The tcs group to search.
  615. * @msg: The message we want to find room for.
  616. * @tcs_id: If we return 0 from the function, we return the global ID of the
  617. * TCS to write to here.
  618. * @cmd_id: If we return 0 from the function, we return the index of
  619. * the command array of the returned TCS where the client should
  620. * start writing the message.
  621. *
  622. * Only for use on sleep/wake TCSes since those are the only ones we maintain
  623. * tcs->slots for.
  624. *
  625. * Return: -ENOMEM if there was no room, else 0.
  626. */
  627. static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
  628. int *tcs_id, int *cmd_id)
  629. {
  630. int slot, offset;
  631. int i = 0;
  632. /* Do over, until we can fit the full payload in a single TCS */
  633. do {
  634. slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
  635. i, msg->num_cmds, 0);
  636. if (slot >= tcs->num_tcs * tcs->ncpt)
  637. return -ENOMEM;
  638. i += tcs->ncpt;
  639. } while (slot + msg->num_cmds - 1 >= i);
  640. bitmap_set(tcs->slots, slot, msg->num_cmds);
  641. offset = slot / tcs->ncpt;
  642. *tcs_id = offset + tcs->offset;
  643. *cmd_id = slot % tcs->ncpt;
  644. return 0;
  645. }
  646. /**
  647. * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
  648. * @drv: The controller.
  649. * @msg: The data to be written to the controller.
  650. *
  651. * This should only be called for sleep/wake state, never active-only
  652. * state.
  653. *
  654. * The caller must ensure that no other RPMH actions are happening and the
  655. * controller is idle when this function is called since it runs lockless.
  656. *
  657. * Return: 0 if no error; else -error.
  658. */
  659. int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
  660. {
  661. struct tcs_group *tcs;
  662. int tcs_id = 0, cmd_id = 0;
  663. int ret;
  664. tcs = get_tcs_for_msg(drv, msg);
  665. if (IS_ERR(tcs))
  666. return PTR_ERR(tcs);
  667. /* find the TCS id and the command in the TCS to write to */
  668. ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
  669. if (!ret)
  670. __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
  671. return ret;
  672. }
  673. /**
  674. * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
  675. * @drv: The controller
  676. *
  677. * Checks if any of the AMCs are busy in handling ACTIVE sets.
  678. * This is called from the last cpu powering down before flushing
  679. * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
  680. * power collapse, so deny from the last cpu's pm notification.
  681. *
  682. * Context: Must be called with the drv->lock held.
  683. *
  684. * Return:
  685. * * False - AMCs are idle
  686. * * True - AMCs are busy
  687. */
  688. static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
  689. {
  690. unsigned long set;
  691. const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
  692. unsigned long max;
  693. /*
  694. * If we made an active request on a RSC that does not have a
  695. * dedicated TCS for active state use, then re-purposed wake TCSes
  696. * should be checked for not busy, because we used wake TCSes for
  697. * active requests in this case.
  698. */
  699. if (!tcs->num_tcs)
  700. tcs = &drv->tcs[WAKE_TCS];
  701. max = tcs->offset + tcs->num_tcs;
  702. set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
  703. return set < max;
  704. }
  705. /**
  706. * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS.
  707. * @drv: The controller
  708. *
  709. * Writes maximum wakeup cycles when called from suspend.
  710. * Writes earliest hrtimer wakeup when called from idle.
  711. */
  712. void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv)
  713. {
  714. ktime_t now, wakeup;
  715. u64 wakeup_us, wakeup_cycles = ~0;
  716. u32 lo, hi;
  717. if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call)
  718. return;
  719. /* Set highest time when system (timekeeping) is suspended */
  720. if (system_state == SYSTEM_SUSPEND)
  721. goto exit;
  722. /* Find the earliest hrtimer wakeup from online cpus */
  723. wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev);
  724. /* Find the relative wakeup in kernel time scale */
  725. now = ktime_get();
  726. wakeup = ktime_sub(wakeup, now);
  727. wakeup_us = ktime_to_us(wakeup);
  728. /* Convert the wakeup to arch timer scale */
  729. wakeup_cycles = USECS_TO_CYCLES(wakeup_us);
  730. wakeup_cycles += arch_timer_read_counter();
  731. exit:
  732. lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK;
  733. hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE;
  734. hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK;
  735. hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID;
  736. writel_relaxed(lo, drv->base + RSC_DRV_CTL_TCS_DATA_LO);
  737. writel_relaxed(hi, drv->base + RSC_DRV_CTL_TCS_DATA_HI);
  738. }
  739. /**
  740. * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
  741. * @nfb: Pointer to the notifier block in struct rsc_drv.
  742. * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
  743. * @v: Unused
  744. *
  745. * This function is given to cpu_pm_register_notifier so we can be informed
  746. * about when CPUs go down. When all CPUs go down we know no more active
  747. * transfers will be started so we write sleep/wake sets. This function gets
  748. * called from cpuidle code paths and also at system suspend time.
  749. *
  750. * If its last CPU going down and AMCs are not busy then writes cached sleep
  751. * and wake messages to TCSes. The firmware then takes care of triggering
  752. * them when entering deepest low power modes.
  753. *
  754. * Return: See cpu_pm_register_notifier()
  755. */
  756. static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
  757. unsigned long action, void *v)
  758. {
  759. struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
  760. int ret = NOTIFY_OK;
  761. int cpus_in_pm;
  762. switch (action) {
  763. case CPU_PM_ENTER:
  764. cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
  765. /*
  766. * NOTE: comments for num_online_cpus() point out that it's
  767. * only a snapshot so we need to be careful. It should be OK
  768. * for us to use, though. It's important for us not to miss
  769. * if we're the last CPU going down so it would only be a
  770. * problem if a CPU went offline right after we did the check
  771. * AND that CPU was not idle AND that CPU was the last non-idle
  772. * CPU. That can't happen. CPUs would have to come out of idle
  773. * before the CPU could go offline.
  774. */
  775. if (cpus_in_pm < num_online_cpus())
  776. return NOTIFY_OK;
  777. break;
  778. case CPU_PM_ENTER_FAILED:
  779. case CPU_PM_EXIT:
  780. atomic_dec(&drv->cpus_in_pm);
  781. return NOTIFY_OK;
  782. default:
  783. return NOTIFY_DONE;
  784. }
  785. /*
  786. * It's likely we're on the last CPU. Grab the drv->lock and write
  787. * out the sleep/wake commands to RPMH hardware. Grabbing the lock
  788. * means that if we race with another CPU coming up we are still
  789. * guaranteed to be safe. If another CPU came up just after we checked
  790. * and has grabbed the lock or started an active transfer then we'll
  791. * notice we're busy and abort. If another CPU comes up after we start
  792. * flushing it will be blocked from starting an active transfer until
  793. * we're done flushing. If another CPU starts an active transfer after
  794. * we release the lock we're still OK because we're no longer the last
  795. * CPU.
  796. */
  797. if (spin_trylock(&drv->lock)) {
  798. if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
  799. ret = NOTIFY_BAD;
  800. spin_unlock(&drv->lock);
  801. } else {
  802. /* Another CPU must be up */
  803. return NOTIFY_OK;
  804. }
  805. if (ret == NOTIFY_BAD) {
  806. /* Double-check if we're here because someone else is up */
  807. if (cpus_in_pm < num_online_cpus())
  808. ret = NOTIFY_OK;
  809. else
  810. /* We won't be called w/ CPU_PM_ENTER_FAILED */
  811. atomic_dec(&drv->cpus_in_pm);
  812. }
  813. return ret;
  814. }
  815. /**
  816. * rpmh_rsc_pd_callback() - Check if any of the AMCs are busy.
  817. * @nfb: Pointer to the genpd notifier block in struct rsc_drv.
  818. * @action: GENPD_NOTIFY_PRE_OFF, GENPD_NOTIFY_OFF, GENPD_NOTIFY_PRE_ON or GENPD_NOTIFY_ON.
  819. * @v: Unused
  820. *
  821. * This function is given to dev_pm_genpd_add_notifier() so we can be informed
  822. * about when cluster-pd is going down. When cluster go down we know no more active
  823. * transfers will be started so we write sleep/wake sets. This function gets
  824. * called from cpuidle code paths and also at system suspend time.
  825. *
  826. * If AMCs are not busy then writes cached sleep and wake messages to TCSes.
  827. * The firmware then takes care of triggering them when entering deepest low power modes.
  828. *
  829. * Return:
  830. * * NOTIFY_OK - success
  831. * * NOTIFY_BAD - failure
  832. */
  833. static int rpmh_rsc_pd_callback(struct notifier_block *nfb,
  834. unsigned long action, void *v)
  835. {
  836. struct rsc_drv *drv = container_of(nfb, struct rsc_drv, genpd_nb);
  837. /* We don't need to lock as genpd on/off are serialized */
  838. if ((action == GENPD_NOTIFY_PRE_OFF) &&
  839. (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)))
  840. return NOTIFY_BAD;
  841. return NOTIFY_OK;
  842. }
  843. static int rpmh_rsc_pd_attach(struct rsc_drv *drv, struct device *dev)
  844. {
  845. int ret;
  846. pm_runtime_enable(dev);
  847. drv->genpd_nb.notifier_call = rpmh_rsc_pd_callback;
  848. ret = dev_pm_genpd_add_notifier(dev, &drv->genpd_nb);
  849. if (ret)
  850. pm_runtime_disable(dev);
  851. return ret;
  852. }
  853. static int rpmh_probe_tcs_config(struct platform_device *pdev, struct rsc_drv *drv)
  854. {
  855. struct tcs_type_config {
  856. u32 type;
  857. u32 n;
  858. } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
  859. struct device_node *dn = pdev->dev.of_node;
  860. u32 config, max_tcs, ncpt, offset;
  861. int i, ret, n, st = 0;
  862. struct tcs_group *tcs;
  863. ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
  864. if (ret)
  865. return ret;
  866. drv->tcs_base = drv->base + offset;
  867. config = readl_relaxed(drv->base + drv->regs[DRV_PRNT_CHLD_CONFIG]);
  868. max_tcs = config;
  869. max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
  870. max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
  871. ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
  872. ncpt = ncpt >> DRV_NCPT_SHIFT;
  873. n = of_property_count_u32_elems(dn, "qcom,tcs-config");
  874. if (n != 2 * TCS_TYPE_NR)
  875. return -EINVAL;
  876. for (i = 0; i < TCS_TYPE_NR; i++) {
  877. ret = of_property_read_u32_index(dn, "qcom,tcs-config",
  878. i * 2, &tcs_cfg[i].type);
  879. if (ret)
  880. return ret;
  881. if (tcs_cfg[i].type >= TCS_TYPE_NR)
  882. return -EINVAL;
  883. ret = of_property_read_u32_index(dn, "qcom,tcs-config",
  884. i * 2 + 1, &tcs_cfg[i].n);
  885. if (ret)
  886. return ret;
  887. if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
  888. return -EINVAL;
  889. }
  890. for (i = 0; i < TCS_TYPE_NR; i++) {
  891. tcs = &drv->tcs[tcs_cfg[i].type];
  892. if (tcs->drv)
  893. return -EINVAL;
  894. tcs->drv = drv;
  895. tcs->type = tcs_cfg[i].type;
  896. tcs->num_tcs = tcs_cfg[i].n;
  897. tcs->ncpt = ncpt;
  898. if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
  899. continue;
  900. if (st + tcs->num_tcs > max_tcs ||
  901. st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
  902. return -EINVAL;
  903. tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
  904. tcs->offset = st;
  905. st += tcs->num_tcs;
  906. }
  907. drv->num_tcs = st;
  908. return 0;
  909. }
  910. static int rpmh_rsc_probe(struct platform_device *pdev)
  911. {
  912. struct device_node *dn = pdev->dev.of_node;
  913. struct rsc_drv *drv;
  914. char drv_id[10] = {0};
  915. int ret, irq;
  916. u32 solver_config;
  917. u32 rsc_id;
  918. /*
  919. * Even though RPMh doesn't directly use cmd-db, all of its children
  920. * do. To avoid adding this check to our children we'll do it now.
  921. */
  922. ret = cmd_db_ready();
  923. if (ret) {
  924. if (ret != -EPROBE_DEFER)
  925. dev_err(&pdev->dev, "Command DB not available (%d)\n",
  926. ret);
  927. return ret;
  928. }
  929. drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
  930. if (!drv)
  931. return -ENOMEM;
  932. ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
  933. if (ret)
  934. return ret;
  935. drv->name = of_get_property(dn, "label", NULL);
  936. if (!drv->name)
  937. drv->name = dev_name(&pdev->dev);
  938. snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
  939. drv->base = devm_platform_ioremap_resource_byname(pdev, drv_id);
  940. if (IS_ERR(drv->base))
  941. return PTR_ERR(drv->base);
  942. rsc_id = readl_relaxed(drv->base + RSC_DRV_ID);
  943. drv->ver.major = rsc_id & (MAJOR_VER_MASK << MAJOR_VER_SHIFT);
  944. drv->ver.major >>= MAJOR_VER_SHIFT;
  945. drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
  946. drv->ver.minor >>= MINOR_VER_SHIFT;
  947. if (drv->ver.major >= 3)
  948. drv->regs = rpmh_rsc_reg_offset_ver_3_0;
  949. else
  950. drv->regs = rpmh_rsc_reg_offset_ver_2_7;
  951. ret = rpmh_probe_tcs_config(pdev, drv);
  952. if (ret)
  953. return ret;
  954. spin_lock_init(&drv->lock);
  955. init_waitqueue_head(&drv->tcs_wait);
  956. bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
  957. irq = platform_get_irq(pdev, drv->id);
  958. if (irq < 0)
  959. return irq;
  960. ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
  961. IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
  962. drv->name, drv);
  963. if (ret)
  964. return ret;
  965. /*
  966. * CPU PM/genpd notification are not required for controllers that support
  967. * 'HW solver' mode where they can be in autonomous mode executing low
  968. * power mode to power down.
  969. */
  970. solver_config = readl_relaxed(drv->base + drv->regs[DRV_SOLVER_CONFIG]);
  971. solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
  972. solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
  973. if (!solver_config) {
  974. if (pdev->dev.pm_domain) {
  975. ret = rpmh_rsc_pd_attach(drv, &pdev->dev);
  976. if (ret)
  977. return ret;
  978. } else {
  979. drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
  980. cpu_pm_register_notifier(&drv->rsc_pm);
  981. }
  982. }
  983. /* Enable the active TCS to send requests immediately */
  984. writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
  985. drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
  986. spin_lock_init(&drv->client.cache_lock);
  987. INIT_LIST_HEAD(&drv->client.cache);
  988. INIT_LIST_HEAD(&drv->client.batch_cache);
  989. dev_set_drvdata(&pdev->dev, drv);
  990. drv->dev = &pdev->dev;
  991. ret = devm_of_platform_populate(&pdev->dev);
  992. if (ret && pdev->dev.pm_domain) {
  993. dev_pm_genpd_remove_notifier(&pdev->dev);
  994. pm_runtime_disable(&pdev->dev);
  995. }
  996. return ret;
  997. }
  998. static const struct of_device_id rpmh_drv_match[] = {
  999. { .compatible = "qcom,rpmh-rsc", },
  1000. { }
  1001. };
  1002. MODULE_DEVICE_TABLE(of, rpmh_drv_match);
  1003. static struct platform_driver rpmh_driver = {
  1004. .probe = rpmh_rsc_probe,
  1005. .driver = {
  1006. .name = "rpmh",
  1007. .of_match_table = rpmh_drv_match,
  1008. .suppress_bind_attrs = true,
  1009. },
  1010. };
  1011. static int __init rpmh_driver_init(void)
  1012. {
  1013. return platform_driver_register(&rpmh_driver);
  1014. }
  1015. core_initcall(rpmh_driver_init);
  1016. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
  1017. MODULE_LICENSE("GPL v2");