ti_sci.c 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Texas Instruments System Control Interface Protocol Driver
  4. * Based on drivers/firmware/ti_sci.c from Linux.
  5. *
  6. * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
  7. * Lokesh Vutla <lokeshvutla@ti.com>
  8. */
  9. #include <common.h>
  10. #include <dm.h>
  11. #include <errno.h>
  12. #include <log.h>
  13. #include <mailbox.h>
  14. #include <malloc.h>
  15. #include <dm/device.h>
  16. #include <dm/device_compat.h>
  17. #include <dm/devres.h>
  18. #include <linux/bitops.h>
  19. #include <linux/compat.h>
  20. #include <linux/err.h>
  21. #include <linux/soc/ti/k3-sec-proxy.h>
  22. #include <linux/soc/ti/ti_sci_protocol.h>
  23. #include "ti_sci.h"
  24. #include "ti_sci_static_data.h"
  25. /* List of all TI SCI devices active in system */
  26. static LIST_HEAD(ti_sci_list);
  27. /**
  28. * struct ti_sci_xfer - Structure representing a message flow
  29. * @tx_message: Transmit message
  30. * @rx_len: Receive message length
  31. */
  32. struct ti_sci_xfer {
  33. struct k3_sec_proxy_msg tx_message;
  34. u8 rx_len;
  35. };
  36. /**
  37. * struct ti_sci_rm_type_map - Structure representing TISCI Resource
  38. * management representation of dev_ids.
  39. * @dev_id: TISCI device ID
  40. * @type: Corresponding id as identified by TISCI RM.
  41. *
  42. * Note: This is used only as a work around for using RM range apis
  43. * for AM654 SoC. For future SoCs dev_id will be used as type
  44. * for RM range APIs. In order to maintain ABI backward compatibility
  45. * type is not being changed for AM654 SoC.
  46. */
  47. struct ti_sci_rm_type_map {
  48. u32 dev_id;
  49. u16 type;
  50. };
  51. /**
  52. * struct ti_sci_desc - Description of SoC integration
  53. * @default_host_id: Host identifier representing the compute entity
  54. * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
  55. * @max_msgs: Maximum number of messages that can be pending
  56. * simultaneously in the system
  57. * @max_msg_size: Maximum size of data per message that can be handled.
  58. */
  59. struct ti_sci_desc {
  60. u8 default_host_id;
  61. int max_rx_timeout_ms;
  62. int max_msgs;
  63. int max_msg_size;
  64. };
  65. /**
  66. * struct ti_sci_info - Structure representing a TI SCI instance
  67. * @dev: Device pointer
  68. * @desc: SoC description for this instance
  69. * @handle: Instance of TI SCI handle to send to clients.
  70. * @chan_tx: Transmit mailbox channel
  71. * @chan_rx: Receive mailbox channel
  72. * @xfer: xfer info
  73. * @list: list head
  74. * @is_secure: Determines if the communication is through secure threads.
  75. * @host_id: Host identifier representing the compute entity
  76. * @seq: Seq id used for verification for tx and rx message.
  77. */
  78. struct ti_sci_info {
  79. struct udevice *dev;
  80. const struct ti_sci_desc *desc;
  81. struct ti_sci_handle handle;
  82. struct mbox_chan chan_tx;
  83. struct mbox_chan chan_rx;
  84. struct mbox_chan chan_notify;
  85. struct ti_sci_xfer xfer;
  86. struct list_head list;
  87. struct list_head dev_list;
  88. bool is_secure;
  89. u8 host_id;
  90. u8 seq;
  91. };
  92. struct ti_sci_exclusive_dev {
  93. u32 id;
  94. u32 count;
  95. struct list_head list;
  96. };
  97. #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
  98. /**
  99. * ti_sci_setup_one_xfer() - Setup one message type
  100. * @info: Pointer to SCI entity information
  101. * @msg_type: Message type
  102. * @msg_flags: Flag to set for the message
  103. * @buf: Buffer to be send to mailbox channel
  104. * @tx_message_size: transmit message size
  105. * @rx_message_size: receive message size. may be set to zero for send-only
  106. * transactions.
  107. *
  108. * Helper function which is used by various command functions that are
  109. * exposed to clients of this driver for allocating a message traffic event.
  110. *
  111. * Return: Corresponding ti_sci_xfer pointer if all went fine,
  112. * else appropriate error pointer.
  113. */
  114. static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
  115. u16 msg_type, u32 msg_flags,
  116. u32 *buf,
  117. size_t tx_message_size,
  118. size_t rx_message_size)
  119. {
  120. struct ti_sci_xfer *xfer = &info->xfer;
  121. struct ti_sci_msg_hdr *hdr;
  122. /* Ensure we have sane transfer sizes */
  123. if (rx_message_size > info->desc->max_msg_size ||
  124. tx_message_size > info->desc->max_msg_size ||
  125. (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
  126. tx_message_size < sizeof(*hdr)) {
  127. dev_err(info->dev, "TI-SCI message transfer size not sane\n");
  128. return ERR_PTR(-ERANGE);
  129. }
  130. info->seq = ~info->seq;
  131. xfer->tx_message.buf = buf;
  132. xfer->tx_message.len = tx_message_size;
  133. xfer->rx_len = (u8)rx_message_size;
  134. hdr = (struct ti_sci_msg_hdr *)buf;
  135. hdr->seq = info->seq;
  136. hdr->type = msg_type;
  137. hdr->host = info->host_id;
  138. hdr->flags = msg_flags;
  139. return xfer;
  140. }
  141. /**
  142. * ti_sci_get_response() - Receive response from mailbox channel
  143. * @info: Pointer to SCI entity information
  144. * @xfer: Transfer to initiate and wait for response
  145. * @chan: Channel to receive the response
  146. *
  147. * Return: -ETIMEDOUT in case of no response, if transmit error,
  148. * return corresponding error, else if all goes well,
  149. * return 0.
  150. */
  151. static int ti_sci_get_response(struct ti_sci_info *info,
  152. struct ti_sci_xfer *xfer,
  153. struct mbox_chan *chan)
  154. {
  155. struct k3_sec_proxy_msg *msg = &xfer->tx_message;
  156. struct ti_sci_secure_msg_hdr *secure_hdr;
  157. struct ti_sci_msg_hdr *hdr;
  158. int ret;
  159. /* Receive the response */
  160. ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
  161. if (ret) {
  162. dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
  163. __func__, ret);
  164. return ret;
  165. }
  166. /* ToDo: Verify checksum */
  167. if (info->is_secure) {
  168. secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
  169. msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
  170. }
  171. /* msg is updated by mailbox driver */
  172. hdr = (struct ti_sci_msg_hdr *)msg->buf;
  173. /* Sanity check for message response */
  174. if (hdr->seq != info->seq) {
  175. dev_dbg(info->dev, "%s: Message for %d is not expected\n",
  176. __func__, hdr->seq);
  177. return ret;
  178. }
  179. if (msg->len > info->desc->max_msg_size) {
  180. dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
  181. __func__, msg->len, info->desc->max_msg_size);
  182. return -EINVAL;
  183. }
  184. if (msg->len < xfer->rx_len) {
  185. dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
  186. __func__, msg->len, xfer->rx_len);
  187. }
  188. return ret;
  189. }
  190. /**
  191. * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
  192. * @r: pointer to response buffer
  193. *
  194. * Return: true if the response was an ACK, else returns false.
  195. */
  196. static bool ti_sci_is_response_ack(void *r)
  197. {
  198. struct ti_sci_msg_hdr *hdr = r;
  199. return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
  200. }
  201. /**
  202. * ti_sci_do_xfer() - Do one transfer
  203. * @info: Pointer to SCI entity information
  204. * @xfer: Transfer to initiate and wait for response
  205. *
  206. * Return: 0 if all went fine, else return appropriate error.
  207. */
  208. static int ti_sci_do_xfer(struct ti_sci_info *info,
  209. struct ti_sci_xfer *xfer)
  210. {
  211. struct k3_sec_proxy_msg *msg = &xfer->tx_message;
  212. u8 secure_buf[info->desc->max_msg_size];
  213. struct ti_sci_secure_msg_hdr secure_hdr;
  214. int ret;
  215. if (info->is_secure) {
  216. /* ToDo: get checksum of the entire message */
  217. secure_hdr.checksum = 0;
  218. secure_hdr.reserved = 0;
  219. memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
  220. xfer->tx_message.len);
  221. xfer->tx_message.buf = (u32 *)secure_buf;
  222. xfer->tx_message.len += sizeof(secure_hdr);
  223. if (xfer->rx_len)
  224. xfer->rx_len += sizeof(secure_hdr);
  225. }
  226. /* Send the message */
  227. ret = mbox_send(&info->chan_tx, msg);
  228. if (ret) {
  229. dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
  230. __func__, ret);
  231. return ret;
  232. }
  233. /* Get response if requested */
  234. if (xfer->rx_len) {
  235. ret = ti_sci_get_response(info, xfer, &info->chan_rx);
  236. if (!ti_sci_is_response_ack(xfer->tx_message.buf)) {
  237. dev_err(info->dev, "Message not acknowledged\n");
  238. ret = -ENODEV;
  239. }
  240. }
  241. return ret;
  242. }
  243. /**
  244. * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
  245. * @handle: pointer to TI SCI handle
  246. *
  247. * Updates the SCI information in the internal data structure.
  248. *
  249. * Return: 0 if all went fine, else return appropriate error.
  250. */
  251. static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
  252. {
  253. struct ti_sci_msg_resp_version *rev_info;
  254. struct ti_sci_version_info *ver;
  255. struct ti_sci_msg_hdr hdr;
  256. struct ti_sci_info *info;
  257. struct ti_sci_xfer *xfer;
  258. int ret;
  259. if (IS_ERR(handle))
  260. return PTR_ERR(handle);
  261. if (!handle)
  262. return -EINVAL;
  263. info = handle_to_ti_sci_info(handle);
  264. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
  265. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  266. (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
  267. sizeof(*rev_info));
  268. if (IS_ERR(xfer)) {
  269. ret = PTR_ERR(xfer);
  270. return ret;
  271. }
  272. ret = ti_sci_do_xfer(info, xfer);
  273. if (ret)
  274. return ret;
  275. rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
  276. ver = &handle->version;
  277. ver->abi_major = rev_info->abi_major;
  278. ver->abi_minor = rev_info->abi_minor;
  279. ver->firmware_revision = rev_info->firmware_revision;
  280. strncpy(ver->firmware_description, rev_info->firmware_description,
  281. sizeof(ver->firmware_description));
  282. return 0;
  283. }
  284. /**
  285. * cmd_set_board_config_using_msg() - Common command to send board configuration
  286. * message
  287. * @handle: pointer to TI SCI handle
  288. * @msg_type: One of the TISCI message types to set board configuration
  289. * @addr: Address where the board config structure is located
  290. * @size: Size of the board config structure
  291. *
  292. * Return: 0 if all went well, else returns appropriate error value.
  293. */
  294. static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
  295. u16 msg_type, u64 addr, u32 size)
  296. {
  297. struct ti_sci_msg_board_config req;
  298. struct ti_sci_msg_hdr *resp;
  299. struct ti_sci_info *info;
  300. struct ti_sci_xfer *xfer;
  301. int ret = 0;
  302. if (IS_ERR(handle))
  303. return PTR_ERR(handle);
  304. if (!handle)
  305. return -EINVAL;
  306. info = handle_to_ti_sci_info(handle);
  307. xfer = ti_sci_setup_one_xfer(info, msg_type,
  308. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  309. (u32 *)&req, sizeof(req), sizeof(*resp));
  310. if (IS_ERR(xfer)) {
  311. ret = PTR_ERR(xfer);
  312. return ret;
  313. }
  314. req.boardcfgp_high = (addr >> 32) & 0xffffffff;
  315. req.boardcfgp_low = addr & 0xffffffff;
  316. req.boardcfg_size = size;
  317. ret = ti_sci_do_xfer(info, xfer);
  318. if (ret)
  319. return ret;
  320. return ret;
  321. }
  322. /**
  323. * ti_sci_cmd_set_board_config() - Command to send board configuration message
  324. * @handle: pointer to TI SCI handle
  325. * @addr: Address where the board config structure is located
  326. * @size: Size of the board config structure
  327. *
  328. * Return: 0 if all went well, else returns appropriate error value.
  329. */
  330. static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
  331. u64 addr, u32 size)
  332. {
  333. return cmd_set_board_config_using_msg(handle,
  334. TI_SCI_MSG_BOARD_CONFIG,
  335. addr, size);
  336. }
  337. /**
  338. * ti_sci_cmd_set_board_config_rm() - Command to send board resource
  339. * management configuration
  340. * @handle: pointer to TI SCI handle
  341. * @addr: Address where the board RM config structure is located
  342. * @size: Size of the RM config structure
  343. *
  344. * Return: 0 if all went well, else returns appropriate error value.
  345. */
  346. static
  347. int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
  348. u64 addr, u32 size)
  349. {
  350. return cmd_set_board_config_using_msg(handle,
  351. TI_SCI_MSG_BOARD_CONFIG_RM,
  352. addr, size);
  353. }
  354. /**
  355. * ti_sci_cmd_set_board_config_security() - Command to send board security
  356. * configuration message
  357. * @handle: pointer to TI SCI handle
  358. * @addr: Address where the board security config structure is located
  359. * @size: Size of the security config structure
  360. *
  361. * Return: 0 if all went well, else returns appropriate error value.
  362. */
  363. static
  364. int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
  365. u64 addr, u32 size)
  366. {
  367. return cmd_set_board_config_using_msg(handle,
  368. TI_SCI_MSG_BOARD_CONFIG_SECURITY,
  369. addr, size);
  370. }
  371. /**
  372. * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
  373. * configuration message
  374. * @handle: pointer to TI SCI handle
  375. * @addr: Address where the board PM config structure is located
  376. * @size: Size of the PM config structure
  377. *
  378. * Return: 0 if all went well, else returns appropriate error value.
  379. */
  380. static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
  381. u64 addr, u32 size)
  382. {
  383. return cmd_set_board_config_using_msg(handle,
  384. TI_SCI_MSG_BOARD_CONFIG_PM,
  385. addr, size);
  386. }
  387. static struct ti_sci_exclusive_dev
  388. *ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
  389. {
  390. struct ti_sci_exclusive_dev *dev;
  391. list_for_each_entry(dev, dev_list, list)
  392. if (dev->id == id)
  393. return dev;
  394. return NULL;
  395. }
  396. static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
  397. {
  398. struct ti_sci_exclusive_dev *dev;
  399. dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
  400. if (dev) {
  401. dev->count++;
  402. return;
  403. }
  404. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  405. dev->id = id;
  406. dev->count = 1;
  407. INIT_LIST_HEAD(&dev->list);
  408. list_add_tail(&dev->list, &info->dev_list);
  409. }
  410. static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
  411. {
  412. struct ti_sci_exclusive_dev *dev;
  413. dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
  414. if (!dev)
  415. return;
  416. if (dev->count > 0)
  417. dev->count--;
  418. }
  419. /**
  420. * ti_sci_set_device_state() - Set device state helper
  421. * @handle: pointer to TI SCI handle
  422. * @id: Device identifier
  423. * @flags: flags to setup for the device
  424. * @state: State to move the device to
  425. *
  426. * Return: 0 if all went well, else returns appropriate error value.
  427. */
  428. static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
  429. u32 id, u32 flags, u8 state)
  430. {
  431. struct ti_sci_msg_req_set_device_state req;
  432. struct ti_sci_msg_hdr *resp;
  433. struct ti_sci_info *info;
  434. struct ti_sci_xfer *xfer;
  435. int ret = 0;
  436. if (IS_ERR(handle))
  437. return PTR_ERR(handle);
  438. if (!handle)
  439. return -EINVAL;
  440. info = handle_to_ti_sci_info(handle);
  441. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
  442. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  443. (u32 *)&req, sizeof(req), sizeof(*resp));
  444. if (IS_ERR(xfer)) {
  445. ret = PTR_ERR(xfer);
  446. return ret;
  447. }
  448. req.id = id;
  449. req.state = state;
  450. ret = ti_sci_do_xfer(info, xfer);
  451. if (ret)
  452. return ret;
  453. if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
  454. ti_sci_delete_exclusive_dev(info, id);
  455. else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
  456. ti_sci_add_exclusive_dev(info, id);
  457. return ret;
  458. }
  459. /**
  460. * ti_sci_set_device_state_no_wait() - Set device state helper without
  461. * requesting or waiting for a response.
  462. * @handle: pointer to TI SCI handle
  463. * @id: Device identifier
  464. * @flags: flags to setup for the device
  465. * @state: State to move the device to
  466. *
  467. * Return: 0 if all went well, else returns appropriate error value.
  468. */
  469. static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
  470. u32 id, u32 flags, u8 state)
  471. {
  472. struct ti_sci_msg_req_set_device_state req;
  473. struct ti_sci_info *info;
  474. struct ti_sci_xfer *xfer;
  475. int ret = 0;
  476. if (IS_ERR(handle))
  477. return PTR_ERR(handle);
  478. if (!handle)
  479. return -EINVAL;
  480. info = handle_to_ti_sci_info(handle);
  481. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
  482. flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
  483. (u32 *)&req, sizeof(req), 0);
  484. if (IS_ERR(xfer)) {
  485. ret = PTR_ERR(xfer);
  486. return ret;
  487. }
  488. req.id = id;
  489. req.state = state;
  490. ret = ti_sci_do_xfer(info, xfer);
  491. if (ret)
  492. return ret;
  493. return ret;
  494. }
  495. /**
  496. * ti_sci_get_device_state() - Get device state helper
  497. * @handle: Handle to the device
  498. * @id: Device Identifier
  499. * @clcnt: Pointer to Context Loss Count
  500. * @resets: pointer to resets
  501. * @p_state: pointer to p_state
  502. * @c_state: pointer to c_state
  503. *
  504. * Return: 0 if all went fine, else return appropriate error.
  505. */
  506. static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
  507. u32 id, u32 *clcnt, u32 *resets,
  508. u8 *p_state, u8 *c_state)
  509. {
  510. struct ti_sci_msg_resp_get_device_state *resp;
  511. struct ti_sci_msg_req_get_device_state req;
  512. struct ti_sci_info *info;
  513. struct ti_sci_xfer *xfer;
  514. int ret = 0;
  515. if (IS_ERR(handle))
  516. return PTR_ERR(handle);
  517. if (!handle)
  518. return -EINVAL;
  519. if (!clcnt && !resets && !p_state && !c_state)
  520. return -EINVAL;
  521. info = handle_to_ti_sci_info(handle);
  522. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
  523. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  524. (u32 *)&req, sizeof(req), sizeof(*resp));
  525. if (IS_ERR(xfer)) {
  526. ret = PTR_ERR(xfer);
  527. return ret;
  528. }
  529. req.id = id;
  530. ret = ti_sci_do_xfer(info, xfer);
  531. if (ret)
  532. return ret;
  533. resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
  534. if (clcnt)
  535. *clcnt = resp->context_loss_count;
  536. if (resets)
  537. *resets = resp->resets;
  538. if (p_state)
  539. *p_state = resp->programmed_state;
  540. if (c_state)
  541. *c_state = resp->current_state;
  542. return ret;
  543. }
  544. /**
  545. * ti_sci_cmd_get_device() - command to request for device managed by TISCI
  546. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  547. * @id: Device Identifier
  548. *
  549. * Request for the device - NOTE: the client MUST maintain integrity of
  550. * usage count by balancing get_device with put_device. No refcounting is
  551. * managed by driver for that purpose.
  552. *
  553. * NOTE: The request is for exclusive access for the processor.
  554. *
  555. * Return: 0 if all went fine, else return appropriate error.
  556. */
  557. static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
  558. {
  559. return ti_sci_set_device_state(handle, id, 0,
  560. MSG_DEVICE_SW_STATE_ON);
  561. }
  562. static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
  563. u32 id)
  564. {
  565. return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
  566. MSG_DEVICE_SW_STATE_ON);
  567. }
  568. /**
  569. * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
  570. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  571. * @id: Device Identifier
  572. *
  573. * Request for the device - NOTE: the client MUST maintain integrity of
  574. * usage count by balancing get_device with put_device. No refcounting is
  575. * managed by driver for that purpose.
  576. *
  577. * Return: 0 if all went fine, else return appropriate error.
  578. */
  579. static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
  580. {
  581. return ti_sci_set_device_state(handle, id,
  582. 0,
  583. MSG_DEVICE_SW_STATE_RETENTION);
  584. }
  585. static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
  586. u32 id)
  587. {
  588. return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
  589. MSG_DEVICE_SW_STATE_RETENTION);
  590. }
  591. /**
  592. * ti_sci_cmd_put_device() - command to release a device managed by TISCI
  593. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  594. * @id: Device Identifier
  595. *
  596. * Request for the device - NOTE: the client MUST maintain integrity of
  597. * usage count by balancing get_device with put_device. No refcounting is
  598. * managed by driver for that purpose.
  599. *
  600. * Return: 0 if all went fine, else return appropriate error.
  601. */
  602. static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
  603. {
  604. return ti_sci_set_device_state(handle, id, 0,
  605. MSG_DEVICE_SW_STATE_AUTO_OFF);
  606. }
  607. static
  608. int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
  609. {
  610. struct ti_sci_exclusive_dev *dev, *tmp;
  611. struct ti_sci_info *info;
  612. int i, cnt;
  613. info = handle_to_ti_sci_info(handle);
  614. list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
  615. cnt = dev->count;
  616. debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
  617. for (i = 0; i < cnt; i++)
  618. ti_sci_cmd_put_device(handle, dev->id);
  619. }
  620. return 0;
  621. }
  622. /**
  623. * ti_sci_cmd_dev_is_valid() - Is the device valid
  624. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  625. * @id: Device Identifier
  626. *
  627. * Return: 0 if all went fine and the device ID is valid, else return
  628. * appropriate error.
  629. */
  630. static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
  631. {
  632. u8 unused;
  633. /* check the device state which will also tell us if the ID is valid */
  634. return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
  635. }
  636. /**
  637. * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
  638. * @handle: Pointer to TISCI handle
  639. * @id: Device Identifier
  640. * @count: Pointer to Context Loss counter to populate
  641. *
  642. * Return: 0 if all went fine, else return appropriate error.
  643. */
  644. static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
  645. u32 *count)
  646. {
  647. return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
  648. }
  649. /**
  650. * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
  651. * @handle: Pointer to TISCI handle
  652. * @id: Device Identifier
  653. * @r_state: true if requested to be idle
  654. *
  655. * Return: 0 if all went fine, else return appropriate error.
  656. */
  657. static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
  658. bool *r_state)
  659. {
  660. int ret;
  661. u8 state;
  662. if (!r_state)
  663. return -EINVAL;
  664. ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
  665. if (ret)
  666. return ret;
  667. *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
  668. return 0;
  669. }
  670. /**
  671. * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
  672. * @handle: Pointer to TISCI handle
  673. * @id: Device Identifier
  674. * @r_state: true if requested to be stopped
  675. * @curr_state: true if currently stopped.
  676. *
  677. * Return: 0 if all went fine, else return appropriate error.
  678. */
  679. static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
  680. bool *r_state, bool *curr_state)
  681. {
  682. int ret;
  683. u8 p_state, c_state;
  684. if (!r_state && !curr_state)
  685. return -EINVAL;
  686. ret =
  687. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  688. if (ret)
  689. return ret;
  690. if (r_state)
  691. *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
  692. if (curr_state)
  693. *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
  694. return 0;
  695. }
  696. /**
  697. * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
  698. * @handle: Pointer to TISCI handle
  699. * @id: Device Identifier
  700. * @r_state: true if requested to be ON
  701. * @curr_state: true if currently ON and active
  702. *
  703. * Return: 0 if all went fine, else return appropriate error.
  704. */
  705. static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
  706. bool *r_state, bool *curr_state)
  707. {
  708. int ret;
  709. u8 p_state, c_state;
  710. if (!r_state && !curr_state)
  711. return -EINVAL;
  712. ret =
  713. ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
  714. if (ret)
  715. return ret;
  716. if (r_state)
  717. *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
  718. if (curr_state)
  719. *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
  720. return 0;
  721. }
  722. /**
  723. * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
  724. * @handle: Pointer to TISCI handle
  725. * @id: Device Identifier
  726. * @curr_state: true if currently transitioning.
  727. *
  728. * Return: 0 if all went fine, else return appropriate error.
  729. */
  730. static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
  731. bool *curr_state)
  732. {
  733. int ret;
  734. u8 state;
  735. if (!curr_state)
  736. return -EINVAL;
  737. ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
  738. if (ret)
  739. return ret;
  740. *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
  741. return 0;
  742. }
  743. /**
  744. * ti_sci_cmd_set_device_resets() - command to set resets for device managed
  745. * by TISCI
  746. * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
  747. * @id: Device Identifier
  748. * @reset_state: Device specific reset bit field
  749. *
  750. * Return: 0 if all went fine, else return appropriate error.
  751. */
  752. static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
  753. u32 id, u32 reset_state)
  754. {
  755. struct ti_sci_msg_req_set_device_resets req;
  756. struct ti_sci_msg_hdr *resp;
  757. struct ti_sci_info *info;
  758. struct ti_sci_xfer *xfer;
  759. int ret = 0;
  760. if (IS_ERR(handle))
  761. return PTR_ERR(handle);
  762. if (!handle)
  763. return -EINVAL;
  764. info = handle_to_ti_sci_info(handle);
  765. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
  766. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  767. (u32 *)&req, sizeof(req), sizeof(*resp));
  768. if (IS_ERR(xfer)) {
  769. ret = PTR_ERR(xfer);
  770. return ret;
  771. }
  772. req.id = id;
  773. req.resets = reset_state;
  774. ret = ti_sci_do_xfer(info, xfer);
  775. if (ret)
  776. return ret;
  777. return ret;
  778. }
  779. /**
  780. * ti_sci_cmd_get_device_resets() - Get reset state for device managed
  781. * by TISCI
  782. * @handle: Pointer to TISCI handle
  783. * @id: Device Identifier
  784. * @reset_state: Pointer to reset state to populate
  785. *
  786. * Return: 0 if all went fine, else return appropriate error.
  787. */
  788. static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
  789. u32 id, u32 *reset_state)
  790. {
  791. return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
  792. NULL);
  793. }
  794. /**
  795. * ti_sci_set_clock_state() - Set clock state helper
  796. * @handle: pointer to TI SCI handle
  797. * @dev_id: Device identifier this request is for
  798. * @clk_id: Clock identifier for the device for this request.
  799. * Each device has it's own set of clock inputs. This indexes
  800. * which clock input to modify.
  801. * @flags: Header flags as needed
  802. * @state: State to request for the clock.
  803. *
  804. * Return: 0 if all went well, else returns appropriate error value.
  805. */
  806. static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
  807. u32 dev_id, u8 clk_id,
  808. u32 flags, u8 state)
  809. {
  810. struct ti_sci_msg_req_set_clock_state req;
  811. struct ti_sci_msg_hdr *resp;
  812. struct ti_sci_info *info;
  813. struct ti_sci_xfer *xfer;
  814. int ret = 0;
  815. if (IS_ERR(handle))
  816. return PTR_ERR(handle);
  817. if (!handle)
  818. return -EINVAL;
  819. info = handle_to_ti_sci_info(handle);
  820. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
  821. flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  822. (u32 *)&req, sizeof(req), sizeof(*resp));
  823. if (IS_ERR(xfer)) {
  824. ret = PTR_ERR(xfer);
  825. return ret;
  826. }
  827. req.dev_id = dev_id;
  828. req.clk_id = clk_id;
  829. req.request_state = state;
  830. ret = ti_sci_do_xfer(info, xfer);
  831. if (ret)
  832. return ret;
  833. return ret;
  834. }
  835. /**
  836. * ti_sci_cmd_get_clock_state() - Get clock state helper
  837. * @handle: pointer to TI SCI handle
  838. * @dev_id: Device identifier this request is for
  839. * @clk_id: Clock identifier for the device for this request.
  840. * Each device has it's own set of clock inputs. This indexes
  841. * which clock input to modify.
  842. * @programmed_state: State requested for clock to move to
  843. * @current_state: State that the clock is currently in
  844. *
  845. * Return: 0 if all went well, else returns appropriate error value.
  846. */
  847. static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
  848. u32 dev_id, u8 clk_id,
  849. u8 *programmed_state, u8 *current_state)
  850. {
  851. struct ti_sci_msg_resp_get_clock_state *resp;
  852. struct ti_sci_msg_req_get_clock_state req;
  853. struct ti_sci_info *info;
  854. struct ti_sci_xfer *xfer;
  855. int ret = 0;
  856. if (IS_ERR(handle))
  857. return PTR_ERR(handle);
  858. if (!handle)
  859. return -EINVAL;
  860. if (!programmed_state && !current_state)
  861. return -EINVAL;
  862. info = handle_to_ti_sci_info(handle);
  863. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
  864. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  865. (u32 *)&req, sizeof(req), sizeof(*resp));
  866. if (IS_ERR(xfer)) {
  867. ret = PTR_ERR(xfer);
  868. return ret;
  869. }
  870. req.dev_id = dev_id;
  871. req.clk_id = clk_id;
  872. ret = ti_sci_do_xfer(info, xfer);
  873. if (ret)
  874. return ret;
  875. resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
  876. if (programmed_state)
  877. *programmed_state = resp->programmed_state;
  878. if (current_state)
  879. *current_state = resp->current_state;
  880. return ret;
  881. }
  882. /**
  883. * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
  884. * @handle: pointer to TI SCI handle
  885. * @dev_id: Device identifier this request is for
  886. * @clk_id: Clock identifier for the device for this request.
  887. * Each device has it's own set of clock inputs. This indexes
  888. * which clock input to modify.
  889. * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
  890. * @can_change_freq: 'true' if frequency change is desired, else 'false'
  891. * @enable_input_term: 'true' if input termination is desired, else 'false'
  892. *
  893. * Return: 0 if all went well, else returns appropriate error value.
  894. */
  895. static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
  896. u8 clk_id, bool needs_ssc, bool can_change_freq,
  897. bool enable_input_term)
  898. {
  899. u32 flags = 0;
  900. flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
  901. flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
  902. flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
  903. return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
  904. MSG_CLOCK_SW_STATE_REQ);
  905. }
  906. /**
  907. * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
  908. * @handle: pointer to TI SCI handle
  909. * @dev_id: Device identifier this request is for
  910. * @clk_id: Clock identifier for the device for this request.
  911. * Each device has it's own set of clock inputs. This indexes
  912. * which clock input to modify.
  913. *
  914. * NOTE: This clock must have been requested by get_clock previously.
  915. *
  916. * Return: 0 if all went well, else returns appropriate error value.
  917. */
  918. static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
  919. u32 dev_id, u8 clk_id)
  920. {
  921. return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
  922. MSG_CLOCK_SW_STATE_UNREQ);
  923. }
  924. /**
  925. * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
  926. * @handle: pointer to TI SCI handle
  927. * @dev_id: Device identifier this request is for
  928. * @clk_id: Clock identifier for the device for this request.
  929. * Each device has it's own set of clock inputs. This indexes
  930. * which clock input to modify.
  931. *
  932. * NOTE: This clock must have been requested by get_clock previously.
  933. *
  934. * Return: 0 if all went well, else returns appropriate error value.
  935. */
  936. static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
  937. u32 dev_id, u8 clk_id)
  938. {
  939. return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
  940. MSG_CLOCK_SW_STATE_AUTO);
  941. }
  942. /**
  943. * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
  944. * @handle: pointer to TI SCI handle
  945. * @dev_id: Device identifier this request is for
  946. * @clk_id: Clock identifier for the device for this request.
  947. * Each device has it's own set of clock inputs. This indexes
  948. * which clock input to modify.
  949. * @req_state: state indicating if the clock is auto managed
  950. *
  951. * Return: 0 if all went well, else returns appropriate error value.
  952. */
  953. static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
  954. u32 dev_id, u8 clk_id, bool *req_state)
  955. {
  956. u8 state = 0;
  957. int ret;
  958. if (!req_state)
  959. return -EINVAL;
  960. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
  961. if (ret)
  962. return ret;
  963. *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
  964. return 0;
  965. }
  966. /**
  967. * ti_sci_cmd_clk_is_on() - Is the clock ON
  968. * @handle: pointer to TI SCI handle
  969. * @dev_id: Device identifier this request is for
  970. * @clk_id: Clock identifier for the device for this request.
  971. * Each device has it's own set of clock inputs. This indexes
  972. * which clock input to modify.
  973. * @req_state: state indicating if the clock is managed by us and enabled
  974. * @curr_state: state indicating if the clock is ready for operation
  975. *
  976. * Return: 0 if all went well, else returns appropriate error value.
  977. */
  978. static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
  979. u8 clk_id, bool *req_state, bool *curr_state)
  980. {
  981. u8 c_state = 0, r_state = 0;
  982. int ret;
  983. if (!req_state && !curr_state)
  984. return -EINVAL;
  985. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  986. &r_state, &c_state);
  987. if (ret)
  988. return ret;
  989. if (req_state)
  990. *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
  991. if (curr_state)
  992. *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
  993. return 0;
  994. }
  995. /**
  996. * ti_sci_cmd_clk_is_off() - Is the clock OFF
  997. * @handle: pointer to TI SCI handle
  998. * @dev_id: Device identifier this request is for
  999. * @clk_id: Clock identifier for the device for this request.
  1000. * Each device has it's own set of clock inputs. This indexes
  1001. * which clock input to modify.
  1002. * @req_state: state indicating if the clock is managed by us and disabled
  1003. * @curr_state: state indicating if the clock is NOT ready for operation
  1004. *
  1005. * Return: 0 if all went well, else returns appropriate error value.
  1006. */
  1007. static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
  1008. u8 clk_id, bool *req_state, bool *curr_state)
  1009. {
  1010. u8 c_state = 0, r_state = 0;
  1011. int ret;
  1012. if (!req_state && !curr_state)
  1013. return -EINVAL;
  1014. ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
  1015. &r_state, &c_state);
  1016. if (ret)
  1017. return ret;
  1018. if (req_state)
  1019. *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
  1020. if (curr_state)
  1021. *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
  1022. return 0;
  1023. }
  1024. /**
  1025. * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
  1026. * @handle: pointer to TI SCI handle
  1027. * @dev_id: Device identifier this request is for
  1028. * @clk_id: Clock identifier for the device for this request.
  1029. * Each device has it's own set of clock inputs. This indexes
  1030. * which clock input to modify.
  1031. * @parent_id: Parent clock identifier to set
  1032. *
  1033. * Return: 0 if all went well, else returns appropriate error value.
  1034. */
  1035. static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
  1036. u32 dev_id, u8 clk_id, u8 parent_id)
  1037. {
  1038. struct ti_sci_msg_req_set_clock_parent req;
  1039. struct ti_sci_msg_hdr *resp;
  1040. struct ti_sci_info *info;
  1041. struct ti_sci_xfer *xfer;
  1042. int ret = 0;
  1043. if (IS_ERR(handle))
  1044. return PTR_ERR(handle);
  1045. if (!handle)
  1046. return -EINVAL;
  1047. info = handle_to_ti_sci_info(handle);
  1048. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
  1049. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1050. (u32 *)&req, sizeof(req), sizeof(*resp));
  1051. if (IS_ERR(xfer)) {
  1052. ret = PTR_ERR(xfer);
  1053. return ret;
  1054. }
  1055. req.dev_id = dev_id;
  1056. req.clk_id = clk_id;
  1057. req.parent_id = parent_id;
  1058. ret = ti_sci_do_xfer(info, xfer);
  1059. if (ret)
  1060. return ret;
  1061. return ret;
  1062. }
  1063. /**
  1064. * ti_sci_cmd_clk_get_parent() - Get current parent clock source
  1065. * @handle: pointer to TI SCI handle
  1066. * @dev_id: Device identifier this request is for
  1067. * @clk_id: Clock identifier for the device for this request.
  1068. * Each device has it's own set of clock inputs. This indexes
  1069. * which clock input to modify.
  1070. * @parent_id: Current clock parent
  1071. *
  1072. * Return: 0 if all went well, else returns appropriate error value.
  1073. */
  1074. static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
  1075. u32 dev_id, u8 clk_id, u8 *parent_id)
  1076. {
  1077. struct ti_sci_msg_resp_get_clock_parent *resp;
  1078. struct ti_sci_msg_req_get_clock_parent req;
  1079. struct ti_sci_info *info;
  1080. struct ti_sci_xfer *xfer;
  1081. int ret = 0;
  1082. if (IS_ERR(handle))
  1083. return PTR_ERR(handle);
  1084. if (!handle || !parent_id)
  1085. return -EINVAL;
  1086. info = handle_to_ti_sci_info(handle);
  1087. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
  1088. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1089. (u32 *)&req, sizeof(req), sizeof(*resp));
  1090. if (IS_ERR(xfer)) {
  1091. ret = PTR_ERR(xfer);
  1092. return ret;
  1093. }
  1094. req.dev_id = dev_id;
  1095. req.clk_id = clk_id;
  1096. ret = ti_sci_do_xfer(info, xfer);
  1097. if (ret)
  1098. return ret;
  1099. *parent_id = resp->parent_id;
  1100. return ret;
  1101. }
  1102. /**
  1103. * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
  1104. * @handle: pointer to TI SCI handle
  1105. * @dev_id: Device identifier this request is for
  1106. * @clk_id: Clock identifier for the device for this request.
  1107. * Each device has it's own set of clock inputs. This indexes
  1108. * which clock input to modify.
  1109. * @num_parents: Returns he number of parents to the current clock.
  1110. *
  1111. * Return: 0 if all went well, else returns appropriate error value.
  1112. */
  1113. static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
  1114. u32 dev_id, u8 clk_id,
  1115. u8 *num_parents)
  1116. {
  1117. struct ti_sci_msg_resp_get_clock_num_parents *resp;
  1118. struct ti_sci_msg_req_get_clock_num_parents req;
  1119. struct ti_sci_info *info;
  1120. struct ti_sci_xfer *xfer;
  1121. int ret = 0;
  1122. if (IS_ERR(handle))
  1123. return PTR_ERR(handle);
  1124. if (!handle || !num_parents)
  1125. return -EINVAL;
  1126. info = handle_to_ti_sci_info(handle);
  1127. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
  1128. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1129. (u32 *)&req, sizeof(req), sizeof(*resp));
  1130. if (IS_ERR(xfer)) {
  1131. ret = PTR_ERR(xfer);
  1132. return ret;
  1133. }
  1134. req.dev_id = dev_id;
  1135. req.clk_id = clk_id;
  1136. ret = ti_sci_do_xfer(info, xfer);
  1137. if (ret)
  1138. return ret;
  1139. resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
  1140. xfer->tx_message.buf;
  1141. *num_parents = resp->num_parents;
  1142. return ret;
  1143. }
  1144. /**
  1145. * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
  1146. * @handle: pointer to TI SCI handle
  1147. * @dev_id: Device identifier this request is for
  1148. * @clk_id: Clock identifier for the device for this request.
  1149. * Each device has it's own set of clock inputs. This indexes
  1150. * which clock input to modify.
  1151. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1152. * allowable programmed frequency and does not account for clock
  1153. * tolerances and jitter.
  1154. * @target_freq: The target clock frequency in Hz. A frequency will be
  1155. * processed as close to this target frequency as possible.
  1156. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1157. * allowable programmed frequency and does not account for clock
  1158. * tolerances and jitter.
  1159. * @match_freq: Frequency match in Hz response.
  1160. *
  1161. * Return: 0 if all went well, else returns appropriate error value.
  1162. */
  1163. static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
  1164. u32 dev_id, u8 clk_id, u64 min_freq,
  1165. u64 target_freq, u64 max_freq,
  1166. u64 *match_freq)
  1167. {
  1168. struct ti_sci_msg_resp_query_clock_freq *resp;
  1169. struct ti_sci_msg_req_query_clock_freq req;
  1170. struct ti_sci_info *info;
  1171. struct ti_sci_xfer *xfer;
  1172. int ret = 0;
  1173. if (IS_ERR(handle))
  1174. return PTR_ERR(handle);
  1175. if (!handle || !match_freq)
  1176. return -EINVAL;
  1177. info = handle_to_ti_sci_info(handle);
  1178. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
  1179. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1180. (u32 *)&req, sizeof(req), sizeof(*resp));
  1181. if (IS_ERR(xfer)) {
  1182. ret = PTR_ERR(xfer);
  1183. return ret;
  1184. }
  1185. req.dev_id = dev_id;
  1186. req.clk_id = clk_id;
  1187. req.min_freq_hz = min_freq;
  1188. req.target_freq_hz = target_freq;
  1189. req.max_freq_hz = max_freq;
  1190. ret = ti_sci_do_xfer(info, xfer);
  1191. if (ret)
  1192. return ret;
  1193. resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
  1194. *match_freq = resp->freq_hz;
  1195. return ret;
  1196. }
  1197. /**
  1198. * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
  1199. * @handle: pointer to TI SCI handle
  1200. * @dev_id: Device identifier this request is for
  1201. * @clk_id: Clock identifier for the device for this request.
  1202. * Each device has it's own set of clock inputs. This indexes
  1203. * which clock input to modify.
  1204. * @min_freq: The minimum allowable frequency in Hz. This is the minimum
  1205. * allowable programmed frequency and does not account for clock
  1206. * tolerances and jitter.
  1207. * @target_freq: The target clock frequency in Hz. A frequency will be
  1208. * processed as close to this target frequency as possible.
  1209. * @max_freq: The maximum allowable frequency in Hz. This is the maximum
  1210. * allowable programmed frequency and does not account for clock
  1211. * tolerances and jitter.
  1212. *
  1213. * Return: 0 if all went well, else returns appropriate error value.
  1214. */
  1215. static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
  1216. u32 dev_id, u8 clk_id, u64 min_freq,
  1217. u64 target_freq, u64 max_freq)
  1218. {
  1219. struct ti_sci_msg_req_set_clock_freq req;
  1220. struct ti_sci_msg_hdr *resp;
  1221. struct ti_sci_info *info;
  1222. struct ti_sci_xfer *xfer;
  1223. int ret = 0;
  1224. if (IS_ERR(handle))
  1225. return PTR_ERR(handle);
  1226. if (!handle)
  1227. return -EINVAL;
  1228. info = handle_to_ti_sci_info(handle);
  1229. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
  1230. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1231. (u32 *)&req, sizeof(req), sizeof(*resp));
  1232. if (IS_ERR(xfer)) {
  1233. ret = PTR_ERR(xfer);
  1234. return ret;
  1235. }
  1236. req.dev_id = dev_id;
  1237. req.clk_id = clk_id;
  1238. req.min_freq_hz = min_freq;
  1239. req.target_freq_hz = target_freq;
  1240. req.max_freq_hz = max_freq;
  1241. ret = ti_sci_do_xfer(info, xfer);
  1242. if (ret)
  1243. return ret;
  1244. return ret;
  1245. }
  1246. /**
  1247. * ti_sci_cmd_clk_get_freq() - Get current frequency
  1248. * @handle: pointer to TI SCI handle
  1249. * @dev_id: Device identifier this request is for
  1250. * @clk_id: Clock identifier for the device for this request.
  1251. * Each device has it's own set of clock inputs. This indexes
  1252. * which clock input to modify.
  1253. * @freq: Currently frequency in Hz
  1254. *
  1255. * Return: 0 if all went well, else returns appropriate error value.
  1256. */
  1257. static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
  1258. u32 dev_id, u8 clk_id, u64 *freq)
  1259. {
  1260. struct ti_sci_msg_resp_get_clock_freq *resp;
  1261. struct ti_sci_msg_req_get_clock_freq req;
  1262. struct ti_sci_info *info;
  1263. struct ti_sci_xfer *xfer;
  1264. int ret = 0;
  1265. if (IS_ERR(handle))
  1266. return PTR_ERR(handle);
  1267. if (!handle || !freq)
  1268. return -EINVAL;
  1269. info = handle_to_ti_sci_info(handle);
  1270. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
  1271. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1272. (u32 *)&req, sizeof(req), sizeof(*resp));
  1273. if (IS_ERR(xfer)) {
  1274. ret = PTR_ERR(xfer);
  1275. return ret;
  1276. }
  1277. req.dev_id = dev_id;
  1278. req.clk_id = clk_id;
  1279. ret = ti_sci_do_xfer(info, xfer);
  1280. if (ret)
  1281. return ret;
  1282. resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
  1283. *freq = resp->freq_hz;
  1284. return ret;
  1285. }
  1286. /**
  1287. * ti_sci_cmd_core_reboot() - Command to request system reset
  1288. * @handle: pointer to TI SCI handle
  1289. *
  1290. * Return: 0 if all went well, else returns appropriate error value.
  1291. */
  1292. static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
  1293. {
  1294. struct ti_sci_msg_req_reboot req;
  1295. struct ti_sci_msg_hdr *resp;
  1296. struct ti_sci_info *info;
  1297. struct ti_sci_xfer *xfer;
  1298. int ret = 0;
  1299. if (IS_ERR(handle))
  1300. return PTR_ERR(handle);
  1301. if (!handle)
  1302. return -EINVAL;
  1303. info = handle_to_ti_sci_info(handle);
  1304. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
  1305. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1306. (u32 *)&req, sizeof(req), sizeof(*resp));
  1307. if (IS_ERR(xfer)) {
  1308. ret = PTR_ERR(xfer);
  1309. return ret;
  1310. }
  1311. req.domain = 0;
  1312. ret = ti_sci_do_xfer(info, xfer);
  1313. if (ret)
  1314. return ret;
  1315. return ret;
  1316. }
  1317. /**
  1318. * ti_sci_get_resource_range - Helper to get a range of resources assigned
  1319. * to a host. Resource is uniquely identified by
  1320. * type and subtype.
  1321. * @handle: Pointer to TISCI handle.
  1322. * @dev_id: TISCI device ID.
  1323. * @subtype: Resource assignment subtype that is being requested
  1324. * from the given device.
  1325. * @s_host: Host processor ID to which the resources are allocated
  1326. * @range_start: Start index of the resource range
  1327. * @range_num: Number of resources in the range
  1328. *
  1329. * Return: 0 if all went fine, else return appropriate error.
  1330. */
  1331. static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
  1332. u32 dev_id, u8 subtype, u8 s_host,
  1333. u16 *range_start, u16 *range_num)
  1334. {
  1335. struct ti_sci_msg_resp_get_resource_range *resp;
  1336. struct ti_sci_msg_req_get_resource_range req;
  1337. struct ti_sci_xfer *xfer;
  1338. struct ti_sci_info *info;
  1339. int ret = 0;
  1340. if (IS_ERR(handle))
  1341. return PTR_ERR(handle);
  1342. if (!handle)
  1343. return -EINVAL;
  1344. info = handle_to_ti_sci_info(handle);
  1345. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
  1346. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1347. (u32 *)&req, sizeof(req), sizeof(*resp));
  1348. if (IS_ERR(xfer)) {
  1349. ret = PTR_ERR(xfer);
  1350. return ret;
  1351. }
  1352. req.secondary_host = s_host;
  1353. req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
  1354. req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
  1355. ret = ti_sci_do_xfer(info, xfer);
  1356. if (ret)
  1357. goto fail;
  1358. resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
  1359. if (!resp->range_start && !resp->range_num) {
  1360. ret = -ENODEV;
  1361. } else {
  1362. *range_start = resp->range_start;
  1363. *range_num = resp->range_num;
  1364. };
  1365. fail:
  1366. return ret;
  1367. }
  1368. static int __maybe_unused
  1369. ti_sci_cmd_get_resource_range_static(const struct ti_sci_handle *handle,
  1370. u32 dev_id, u8 subtype,
  1371. u16 *range_start, u16 *range_num)
  1372. {
  1373. struct ti_sci_resource_static_data *data;
  1374. int i = 0;
  1375. while (1) {
  1376. data = &rm_static_data[i];
  1377. if (!data->dev_id)
  1378. return -EINVAL;
  1379. if (data->dev_id != dev_id || data->subtype != subtype) {
  1380. i++;
  1381. continue;
  1382. }
  1383. *range_start = data->range_start;
  1384. *range_num = data->range_num;
  1385. return 0;
  1386. }
  1387. return -EINVAL;
  1388. }
  1389. /**
  1390. * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
  1391. * that is same as ti sci interface host.
  1392. * @handle: Pointer to TISCI handle.
  1393. * @dev_id: TISCI device ID.
  1394. * @subtype: Resource assignment subtype that is being requested
  1395. * from the given device.
  1396. * @range_start: Start index of the resource range
  1397. * @range_num: Number of resources in the range
  1398. *
  1399. * Return: 0 if all went fine, else return appropriate error.
  1400. */
  1401. static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
  1402. u32 dev_id, u8 subtype,
  1403. u16 *range_start, u16 *range_num)
  1404. {
  1405. return ti_sci_get_resource_range(handle, dev_id, subtype,
  1406. TI_SCI_IRQ_SECONDARY_HOST_INVALID,
  1407. range_start, range_num);
  1408. }
  1409. /**
  1410. * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
  1411. * assigned to a specified host.
  1412. * @handle: Pointer to TISCI handle.
  1413. * @dev_id: TISCI device ID.
  1414. * @subtype: Resource assignment subtype that is being requested
  1415. * from the given device.
  1416. * @s_host: Host processor ID to which the resources are allocated
  1417. * @range_start: Start index of the resource range
  1418. * @range_num: Number of resources in the range
  1419. *
  1420. * Return: 0 if all went fine, else return appropriate error.
  1421. */
  1422. static
  1423. int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
  1424. u32 dev_id, u8 subtype, u8 s_host,
  1425. u16 *range_start, u16 *range_num)
  1426. {
  1427. return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
  1428. range_start, range_num);
  1429. }
  1430. /**
  1431. * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
  1432. * @handle: pointer to TI SCI handle
  1433. * @msms_start: MSMC start as returned by tisci
  1434. * @msmc_end: MSMC end as returned by tisci
  1435. *
  1436. * Return: 0 if all went well, else returns appropriate error value.
  1437. */
  1438. static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
  1439. u64 *msmc_start, u64 *msmc_end)
  1440. {
  1441. struct ti_sci_msg_resp_query_msmc *resp;
  1442. struct ti_sci_msg_hdr req;
  1443. struct ti_sci_info *info;
  1444. struct ti_sci_xfer *xfer;
  1445. int ret = 0;
  1446. if (IS_ERR(handle))
  1447. return PTR_ERR(handle);
  1448. if (!handle)
  1449. return -EINVAL;
  1450. info = handle_to_ti_sci_info(handle);
  1451. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
  1452. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1453. (u32 *)&req, sizeof(req), sizeof(*resp));
  1454. if (IS_ERR(xfer)) {
  1455. ret = PTR_ERR(xfer);
  1456. return ret;
  1457. }
  1458. ret = ti_sci_do_xfer(info, xfer);
  1459. if (ret)
  1460. return ret;
  1461. resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
  1462. *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
  1463. resp->msmc_start_low;
  1464. *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
  1465. resp->msmc_end_low;
  1466. return ret;
  1467. }
  1468. /**
  1469. * ti_sci_cmd_proc_request() - Command to request a physical processor control
  1470. * @handle: Pointer to TI SCI handle
  1471. * @proc_id: Processor ID this request is for
  1472. *
  1473. * Return: 0 if all went well, else returns appropriate error value.
  1474. */
  1475. static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
  1476. u8 proc_id)
  1477. {
  1478. struct ti_sci_msg_req_proc_request req;
  1479. struct ti_sci_msg_hdr *resp;
  1480. struct ti_sci_info *info;
  1481. struct ti_sci_xfer *xfer;
  1482. int ret = 0;
  1483. if (IS_ERR(handle))
  1484. return PTR_ERR(handle);
  1485. if (!handle)
  1486. return -EINVAL;
  1487. info = handle_to_ti_sci_info(handle);
  1488. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
  1489. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1490. (u32 *)&req, sizeof(req), sizeof(*resp));
  1491. if (IS_ERR(xfer)) {
  1492. ret = PTR_ERR(xfer);
  1493. return ret;
  1494. }
  1495. req.processor_id = proc_id;
  1496. ret = ti_sci_do_xfer(info, xfer);
  1497. if (ret)
  1498. return ret;
  1499. return ret;
  1500. }
  1501. /**
  1502. * ti_sci_cmd_proc_release() - Command to release a physical processor control
  1503. * @handle: Pointer to TI SCI handle
  1504. * @proc_id: Processor ID this request is for
  1505. *
  1506. * Return: 0 if all went well, else returns appropriate error value.
  1507. */
  1508. static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
  1509. u8 proc_id)
  1510. {
  1511. struct ti_sci_msg_req_proc_release req;
  1512. struct ti_sci_msg_hdr *resp;
  1513. struct ti_sci_info *info;
  1514. struct ti_sci_xfer *xfer;
  1515. int ret = 0;
  1516. if (IS_ERR(handle))
  1517. return PTR_ERR(handle);
  1518. if (!handle)
  1519. return -EINVAL;
  1520. info = handle_to_ti_sci_info(handle);
  1521. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
  1522. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1523. (u32 *)&req, sizeof(req), sizeof(*resp));
  1524. if (IS_ERR(xfer)) {
  1525. ret = PTR_ERR(xfer);
  1526. return ret;
  1527. }
  1528. req.processor_id = proc_id;
  1529. ret = ti_sci_do_xfer(info, xfer);
  1530. if (ret)
  1531. return ret;
  1532. return ret;
  1533. }
  1534. /**
  1535. * ti_sci_cmd_proc_handover() - Command to handover a physical processor
  1536. * control to a host in the processor's access
  1537. * control list.
  1538. * @handle: Pointer to TI SCI handle
  1539. * @proc_id: Processor ID this request is for
  1540. * @host_id: Host ID to get the control of the processor
  1541. *
  1542. * Return: 0 if all went well, else returns appropriate error value.
  1543. */
  1544. static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
  1545. u8 proc_id, u8 host_id)
  1546. {
  1547. struct ti_sci_msg_req_proc_handover req;
  1548. struct ti_sci_msg_hdr *resp;
  1549. struct ti_sci_info *info;
  1550. struct ti_sci_xfer *xfer;
  1551. int ret = 0;
  1552. if (IS_ERR(handle))
  1553. return PTR_ERR(handle);
  1554. if (!handle)
  1555. return -EINVAL;
  1556. info = handle_to_ti_sci_info(handle);
  1557. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
  1558. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1559. (u32 *)&req, sizeof(req), sizeof(*resp));
  1560. if (IS_ERR(xfer)) {
  1561. ret = PTR_ERR(xfer);
  1562. return ret;
  1563. }
  1564. req.processor_id = proc_id;
  1565. req.host_id = host_id;
  1566. ret = ti_sci_do_xfer(info, xfer);
  1567. if (ret)
  1568. return ret;
  1569. return ret;
  1570. }
  1571. /**
  1572. * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
  1573. * configuration flags
  1574. * @handle: Pointer to TI SCI handle
  1575. * @proc_id: Processor ID this request is for
  1576. * @config_flags_set: Configuration flags to be set
  1577. * @config_flags_clear: Configuration flags to be cleared.
  1578. *
  1579. * Return: 0 if all went well, else returns appropriate error value.
  1580. */
  1581. static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
  1582. u8 proc_id, u64 bootvector,
  1583. u32 config_flags_set,
  1584. u32 config_flags_clear)
  1585. {
  1586. struct ti_sci_msg_req_set_proc_boot_config req;
  1587. struct ti_sci_msg_hdr *resp;
  1588. struct ti_sci_info *info;
  1589. struct ti_sci_xfer *xfer;
  1590. int ret = 0;
  1591. if (IS_ERR(handle))
  1592. return PTR_ERR(handle);
  1593. if (!handle)
  1594. return -EINVAL;
  1595. info = handle_to_ti_sci_info(handle);
  1596. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
  1597. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1598. (u32 *)&req, sizeof(req), sizeof(*resp));
  1599. if (IS_ERR(xfer)) {
  1600. ret = PTR_ERR(xfer);
  1601. return ret;
  1602. }
  1603. req.processor_id = proc_id;
  1604. req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
  1605. req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
  1606. TISCI_ADDR_HIGH_SHIFT;
  1607. req.config_flags_set = config_flags_set;
  1608. req.config_flags_clear = config_flags_clear;
  1609. ret = ti_sci_do_xfer(info, xfer);
  1610. if (ret)
  1611. return ret;
  1612. return ret;
  1613. }
  1614. /**
  1615. * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
  1616. * control flags
  1617. * @handle: Pointer to TI SCI handle
  1618. * @proc_id: Processor ID this request is for
  1619. * @control_flags_set: Control flags to be set
  1620. * @control_flags_clear: Control flags to be cleared
  1621. *
  1622. * Return: 0 if all went well, else returns appropriate error value.
  1623. */
  1624. static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
  1625. u8 proc_id, u32 control_flags_set,
  1626. u32 control_flags_clear)
  1627. {
  1628. struct ti_sci_msg_req_set_proc_boot_ctrl req;
  1629. struct ti_sci_msg_hdr *resp;
  1630. struct ti_sci_info *info;
  1631. struct ti_sci_xfer *xfer;
  1632. int ret = 0;
  1633. if (IS_ERR(handle))
  1634. return PTR_ERR(handle);
  1635. if (!handle)
  1636. return -EINVAL;
  1637. info = handle_to_ti_sci_info(handle);
  1638. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
  1639. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1640. (u32 *)&req, sizeof(req), sizeof(*resp));
  1641. if (IS_ERR(xfer)) {
  1642. ret = PTR_ERR(xfer);
  1643. return ret;
  1644. }
  1645. req.processor_id = proc_id;
  1646. req.control_flags_set = control_flags_set;
  1647. req.control_flags_clear = control_flags_clear;
  1648. ret = ti_sci_do_xfer(info, xfer);
  1649. if (ret)
  1650. return ret;
  1651. return ret;
  1652. }
  1653. /**
  1654. * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
  1655. * image and then set the processor configuration flags.
  1656. * @handle: Pointer to TI SCI handle
  1657. * @image_addr: Memory address at which payload image and certificate is
  1658. * located in memory, this is updated if the image data is
  1659. * moved during authentication.
  1660. * @image_size: This is updated with the final size of the image after
  1661. * authentication.
  1662. *
  1663. * Return: 0 if all went well, else returns appropriate error value.
  1664. */
  1665. static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
  1666. u64 *image_addr, u32 *image_size)
  1667. {
  1668. struct ti_sci_msg_req_proc_auth_boot_image req;
  1669. struct ti_sci_msg_resp_proc_auth_boot_image *resp;
  1670. struct ti_sci_info *info;
  1671. struct ti_sci_xfer *xfer;
  1672. int ret = 0;
  1673. if (IS_ERR(handle))
  1674. return PTR_ERR(handle);
  1675. if (!handle)
  1676. return -EINVAL;
  1677. info = handle_to_ti_sci_info(handle);
  1678. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMAGE,
  1679. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1680. (u32 *)&req, sizeof(req), sizeof(*resp));
  1681. if (IS_ERR(xfer)) {
  1682. ret = PTR_ERR(xfer);
  1683. return ret;
  1684. }
  1685. req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
  1686. req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
  1687. TISCI_ADDR_HIGH_SHIFT;
  1688. ret = ti_sci_do_xfer(info, xfer);
  1689. if (ret)
  1690. return ret;
  1691. resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
  1692. *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
  1693. (((u64)resp->image_addr_high <<
  1694. TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
  1695. *image_size = resp->image_size;
  1696. return ret;
  1697. }
  1698. /**
  1699. * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
  1700. * @handle: Pointer to TI SCI handle
  1701. * @proc_id: Processor ID this request is for
  1702. *
  1703. * Return: 0 if all went well, else returns appropriate error value.
  1704. */
  1705. static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
  1706. u8 proc_id, u64 *bv, u32 *cfg_flags,
  1707. u32 *ctrl_flags, u32 *sts_flags)
  1708. {
  1709. struct ti_sci_msg_resp_get_proc_boot_status *resp;
  1710. struct ti_sci_msg_req_get_proc_boot_status req;
  1711. struct ti_sci_info *info;
  1712. struct ti_sci_xfer *xfer;
  1713. int ret = 0;
  1714. if (IS_ERR(handle))
  1715. return PTR_ERR(handle);
  1716. if (!handle)
  1717. return -EINVAL;
  1718. info = handle_to_ti_sci_info(handle);
  1719. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
  1720. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1721. (u32 *)&req, sizeof(req), sizeof(*resp));
  1722. if (IS_ERR(xfer)) {
  1723. ret = PTR_ERR(xfer);
  1724. return ret;
  1725. }
  1726. req.processor_id = proc_id;
  1727. ret = ti_sci_do_xfer(info, xfer);
  1728. if (ret)
  1729. return ret;
  1730. resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
  1731. xfer->tx_message.buf;
  1732. *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
  1733. (((u64)resp->bootvector_high <<
  1734. TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
  1735. *cfg_flags = resp->config_flags;
  1736. *ctrl_flags = resp->control_flags;
  1737. *sts_flags = resp->status_flags;
  1738. return ret;
  1739. }
  1740. /**
  1741. * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a
  1742. * processor boot status without requesting or
  1743. * waiting for a response.
  1744. * @proc_id: Processor ID this request is for
  1745. * @num_wait_iterations: Total number of iterations we will check before
  1746. * we will timeout and give up
  1747. * @num_match_iterations: How many iterations should we have continued
  1748. * status to account for status bits glitching.
  1749. * This is to make sure that match occurs for
  1750. * consecutive checks. This implies that the
  1751. * worst case should consider that the stable
  1752. * time should at the worst be num_wait_iterations
  1753. * num_match_iterations to prevent timeout.
  1754. * @delay_per_iteration_us: Specifies how long to wait (in micro seconds)
  1755. * between each status checks. This is the minimum
  1756. * duration, and overhead of register reads and
  1757. * checks are on top of this and can vary based on
  1758. * varied conditions.
  1759. * @delay_before_iterations_us: Specifies how long to wait (in micro seconds)
  1760. * before the very first check in the first
  1761. * iteration of status check loop. This is the
  1762. * minimum duration, and overhead of register
  1763. * reads and checks are.
  1764. * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the
  1765. * status matching this field requested MUST be 1.
  1766. * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the
  1767. * bits matching this field requested MUST be 1.
  1768. * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the
  1769. * status matching this field requested MUST be 0.
  1770. * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the
  1771. * bits matching this field requested MUST be 0.
  1772. *
  1773. * Return: 0 if all goes well, else appropriate error message
  1774. */
  1775. static int
  1776. ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
  1777. u8 proc_id,
  1778. u8 num_wait_iterations,
  1779. u8 num_match_iterations,
  1780. u8 delay_per_iteration_us,
  1781. u8 delay_before_iterations_us,
  1782. u32 status_flags_1_set_all_wait,
  1783. u32 status_flags_1_set_any_wait,
  1784. u32 status_flags_1_clr_all_wait,
  1785. u32 status_flags_1_clr_any_wait)
  1786. {
  1787. struct ti_sci_msg_req_wait_proc_boot_status req;
  1788. struct ti_sci_info *info;
  1789. struct ti_sci_xfer *xfer;
  1790. int ret = 0;
  1791. if (IS_ERR(handle))
  1792. return PTR_ERR(handle);
  1793. if (!handle)
  1794. return -EINVAL;
  1795. info = handle_to_ti_sci_info(handle);
  1796. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
  1797. TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
  1798. (u32 *)&req, sizeof(req), 0);
  1799. if (IS_ERR(xfer)) {
  1800. ret = PTR_ERR(xfer);
  1801. return ret;
  1802. }
  1803. req.processor_id = proc_id;
  1804. req.num_wait_iterations = num_wait_iterations;
  1805. req.num_match_iterations = num_match_iterations;
  1806. req.delay_per_iteration_us = delay_per_iteration_us;
  1807. req.delay_before_iterations_us = delay_before_iterations_us;
  1808. req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
  1809. req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
  1810. req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
  1811. req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
  1812. ret = ti_sci_do_xfer(info, xfer);
  1813. if (ret)
  1814. return ret;
  1815. return ret;
  1816. }
  1817. /**
  1818. * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without
  1819. * requesting or waiting for a response. Note that this API call
  1820. * should be followed by placing the respective processor into
  1821. * either WFE or WFI mode.
  1822. * @handle: Pointer to TI SCI handle
  1823. * @proc_id: Processor ID this request is for
  1824. *
  1825. * Return: 0 if all went well, else returns appropriate error value.
  1826. */
  1827. static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
  1828. u8 proc_id)
  1829. {
  1830. int ret;
  1831. struct ti_sci_info *info;
  1832. if (IS_ERR(handle))
  1833. return PTR_ERR(handle);
  1834. if (!handle)
  1835. return -EINVAL;
  1836. info = handle_to_ti_sci_info(handle);
  1837. /*
  1838. * Send the core boot status wait message waiting for either WFE or
  1839. * WFI without requesting or waiting for a TISCI response with the
  1840. * maximum wait time to give us the best chance to get to the WFE/WFI
  1841. * command that should follow the invocation of this API before the
  1842. * DMSC-internal processing of this command times out. Note that
  1843. * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type
  1844. * core as the related flag bit positions are the same.
  1845. */
  1846. ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
  1847. U8_MAX, 100, U8_MAX, U8_MAX,
  1848. 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
  1849. 0, 0);
  1850. if (ret) {
  1851. dev_err(info->dev, "Sending core %u wait message fail %d\n",
  1852. proc_id, ret);
  1853. return ret;
  1854. }
  1855. /*
  1856. * Release a processor managed by TISCI without requesting or waiting
  1857. * for a response.
  1858. */
  1859. ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
  1860. MSG_DEVICE_SW_STATE_AUTO_OFF);
  1861. if (ret)
  1862. dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
  1863. proc_id, ret);
  1864. return ret;
  1865. }
  1866. /**
  1867. * ti_sci_cmd_ring_config() - configure RA ring
  1868. * @handle: pointer to TI SCI handle
  1869. * @valid_params: Bitfield defining validity of ring configuration parameters.
  1870. * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
  1871. * @index: Ring index.
  1872. * @addr_lo: The ring base address lo 32 bits
  1873. * @addr_hi: The ring base address hi 32 bits
  1874. * @count: Number of ring elements.
  1875. * @mode: The mode of the ring
  1876. * @size: The ring element size.
  1877. * @order_id: Specifies the ring's bus order ID.
  1878. *
  1879. * Return: 0 if all went well, else returns appropriate error value.
  1880. *
  1881. * See @ti_sci_msg_rm_ring_cfg_req for more info.
  1882. */
  1883. static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
  1884. u32 valid_params, u16 nav_id, u16 index,
  1885. u32 addr_lo, u32 addr_hi, u32 count,
  1886. u8 mode, u8 size, u8 order_id)
  1887. {
  1888. struct ti_sci_msg_rm_ring_cfg_resp *resp;
  1889. struct ti_sci_msg_rm_ring_cfg_req req;
  1890. struct ti_sci_xfer *xfer;
  1891. struct ti_sci_info *info;
  1892. int ret = 0;
  1893. if (IS_ERR(handle))
  1894. return PTR_ERR(handle);
  1895. if (!handle)
  1896. return -EINVAL;
  1897. info = handle_to_ti_sci_info(handle);
  1898. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
  1899. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1900. (u32 *)&req, sizeof(req), sizeof(*resp));
  1901. if (IS_ERR(xfer)) {
  1902. ret = PTR_ERR(xfer);
  1903. return ret;
  1904. }
  1905. req.valid_params = valid_params;
  1906. req.nav_id = nav_id;
  1907. req.index = index;
  1908. req.addr_lo = addr_lo;
  1909. req.addr_hi = addr_hi;
  1910. req.count = count;
  1911. req.mode = mode;
  1912. req.size = size;
  1913. req.order_id = order_id;
  1914. ret = ti_sci_do_xfer(info, xfer);
  1915. if (ret)
  1916. goto fail;
  1917. fail:
  1918. dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
  1919. return ret;
  1920. }
  1921. static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
  1922. u32 nav_id, u32 src_thread, u32 dst_thread)
  1923. {
  1924. struct ti_sci_msg_hdr *resp;
  1925. struct ti_sci_msg_psil_pair req;
  1926. struct ti_sci_xfer *xfer;
  1927. struct ti_sci_info *info;
  1928. int ret = 0;
  1929. if (IS_ERR(handle))
  1930. return PTR_ERR(handle);
  1931. if (!handle)
  1932. return -EINVAL;
  1933. info = handle_to_ti_sci_info(handle);
  1934. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
  1935. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1936. (u32 *)&req, sizeof(req), sizeof(*resp));
  1937. if (IS_ERR(xfer)) {
  1938. ret = PTR_ERR(xfer);
  1939. return ret;
  1940. }
  1941. req.nav_id = nav_id;
  1942. req.src_thread = src_thread;
  1943. req.dst_thread = dst_thread;
  1944. ret = ti_sci_do_xfer(info, xfer);
  1945. if (ret)
  1946. goto fail;
  1947. fail:
  1948. dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
  1949. nav_id, src_thread, dst_thread, ret);
  1950. return ret;
  1951. }
  1952. static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
  1953. u32 nav_id, u32 src_thread, u32 dst_thread)
  1954. {
  1955. struct ti_sci_msg_hdr *resp;
  1956. struct ti_sci_msg_psil_unpair req;
  1957. struct ti_sci_xfer *xfer;
  1958. struct ti_sci_info *info;
  1959. int ret = 0;
  1960. if (IS_ERR(handle))
  1961. return PTR_ERR(handle);
  1962. if (!handle)
  1963. return -EINVAL;
  1964. info = handle_to_ti_sci_info(handle);
  1965. xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
  1966. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1967. (u32 *)&req, sizeof(req), sizeof(*resp));
  1968. if (IS_ERR(xfer)) {
  1969. ret = PTR_ERR(xfer);
  1970. return ret;
  1971. }
  1972. req.nav_id = nav_id;
  1973. req.src_thread = src_thread;
  1974. req.dst_thread = dst_thread;
  1975. ret = ti_sci_do_xfer(info, xfer);
  1976. if (ret)
  1977. goto fail;
  1978. fail:
  1979. dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
  1980. src_thread, dst_thread, ret);
  1981. return ret;
  1982. }
  1983. static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
  1984. const struct ti_sci_handle *handle,
  1985. const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
  1986. {
  1987. struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
  1988. struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
  1989. struct ti_sci_xfer *xfer;
  1990. struct ti_sci_info *info;
  1991. int ret = 0;
  1992. if (IS_ERR(handle))
  1993. return PTR_ERR(handle);
  1994. if (!handle)
  1995. return -EINVAL;
  1996. info = handle_to_ti_sci_info(handle);
  1997. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
  1998. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  1999. (u32 *)&req, sizeof(req), sizeof(*resp));
  2000. if (IS_ERR(xfer)) {
  2001. ret = PTR_ERR(xfer);
  2002. return ret;
  2003. }
  2004. req.valid_params = params->valid_params;
  2005. req.nav_id = params->nav_id;
  2006. req.index = params->index;
  2007. req.tx_pause_on_err = params->tx_pause_on_err;
  2008. req.tx_filt_einfo = params->tx_filt_einfo;
  2009. req.tx_filt_pswords = params->tx_filt_pswords;
  2010. req.tx_atype = params->tx_atype;
  2011. req.tx_chan_type = params->tx_chan_type;
  2012. req.tx_supr_tdpkt = params->tx_supr_tdpkt;
  2013. req.tx_fetch_size = params->tx_fetch_size;
  2014. req.tx_credit_count = params->tx_credit_count;
  2015. req.txcq_qnum = params->txcq_qnum;
  2016. req.tx_priority = params->tx_priority;
  2017. req.tx_qos = params->tx_qos;
  2018. req.tx_orderid = params->tx_orderid;
  2019. req.fdepth = params->fdepth;
  2020. req.tx_sched_priority = params->tx_sched_priority;
  2021. req.tx_burst_size = params->tx_burst_size;
  2022. req.tx_tdtype = params->tx_tdtype;
  2023. req.extended_ch_type = params->extended_ch_type;
  2024. ret = ti_sci_do_xfer(info, xfer);
  2025. if (ret)
  2026. goto fail;
  2027. fail:
  2028. dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
  2029. return ret;
  2030. }
  2031. static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
  2032. const struct ti_sci_handle *handle,
  2033. const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
  2034. {
  2035. struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
  2036. struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
  2037. struct ti_sci_xfer *xfer;
  2038. struct ti_sci_info *info;
  2039. int ret = 0;
  2040. if (IS_ERR(handle))
  2041. return PTR_ERR(handle);
  2042. if (!handle)
  2043. return -EINVAL;
  2044. info = handle_to_ti_sci_info(handle);
  2045. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
  2046. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2047. (u32 *)&req, sizeof(req), sizeof(*resp));
  2048. if (IS_ERR(xfer)) {
  2049. ret = PTR_ERR(xfer);
  2050. return ret;
  2051. }
  2052. req.valid_params = params->valid_params;
  2053. req.nav_id = params->nav_id;
  2054. req.index = params->index;
  2055. req.rx_fetch_size = params->rx_fetch_size;
  2056. req.rxcq_qnum = params->rxcq_qnum;
  2057. req.rx_priority = params->rx_priority;
  2058. req.rx_qos = params->rx_qos;
  2059. req.rx_orderid = params->rx_orderid;
  2060. req.rx_sched_priority = params->rx_sched_priority;
  2061. req.flowid_start = params->flowid_start;
  2062. req.flowid_cnt = params->flowid_cnt;
  2063. req.rx_pause_on_err = params->rx_pause_on_err;
  2064. req.rx_atype = params->rx_atype;
  2065. req.rx_chan_type = params->rx_chan_type;
  2066. req.rx_ignore_short = params->rx_ignore_short;
  2067. req.rx_ignore_long = params->rx_ignore_long;
  2068. ret = ti_sci_do_xfer(info, xfer);
  2069. if (ret)
  2070. goto fail;
  2071. fail:
  2072. dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
  2073. return ret;
  2074. }
  2075. static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
  2076. const struct ti_sci_handle *handle,
  2077. const struct ti_sci_msg_rm_udmap_flow_cfg *params)
  2078. {
  2079. struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
  2080. struct ti_sci_msg_rm_udmap_flow_cfg_req req;
  2081. struct ti_sci_xfer *xfer;
  2082. struct ti_sci_info *info;
  2083. int ret = 0;
  2084. if (IS_ERR(handle))
  2085. return PTR_ERR(handle);
  2086. if (!handle)
  2087. return -EINVAL;
  2088. info = handle_to_ti_sci_info(handle);
  2089. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
  2090. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2091. (u32 *)&req, sizeof(req), sizeof(*resp));
  2092. if (IS_ERR(xfer)) {
  2093. ret = PTR_ERR(xfer);
  2094. return ret;
  2095. }
  2096. req.valid_params = params->valid_params;
  2097. req.nav_id = params->nav_id;
  2098. req.flow_index = params->flow_index;
  2099. req.rx_einfo_present = params->rx_einfo_present;
  2100. req.rx_psinfo_present = params->rx_psinfo_present;
  2101. req.rx_error_handling = params->rx_error_handling;
  2102. req.rx_desc_type = params->rx_desc_type;
  2103. req.rx_sop_offset = params->rx_sop_offset;
  2104. req.rx_dest_qnum = params->rx_dest_qnum;
  2105. req.rx_src_tag_hi = params->rx_src_tag_hi;
  2106. req.rx_src_tag_lo = params->rx_src_tag_lo;
  2107. req.rx_dest_tag_hi = params->rx_dest_tag_hi;
  2108. req.rx_dest_tag_lo = params->rx_dest_tag_lo;
  2109. req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
  2110. req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
  2111. req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
  2112. req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
  2113. req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
  2114. req.rx_fdq1_qnum = params->rx_fdq1_qnum;
  2115. req.rx_fdq2_qnum = params->rx_fdq2_qnum;
  2116. req.rx_fdq3_qnum = params->rx_fdq3_qnum;
  2117. req.rx_ps_location = params->rx_ps_location;
  2118. ret = ti_sci_do_xfer(info, xfer);
  2119. if (ret)
  2120. goto fail;
  2121. fail:
  2122. dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
  2123. return ret;
  2124. }
  2125. /**
  2126. * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
  2127. * @handle: pointer to TI SCI handle
  2128. * @region: region configuration parameters
  2129. *
  2130. * Return: 0 if all went well, else returns appropriate error value.
  2131. */
  2132. static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
  2133. const struct ti_sci_msg_fwl_region *region)
  2134. {
  2135. struct ti_sci_msg_fwl_set_firewall_region_req req;
  2136. struct ti_sci_msg_hdr *resp;
  2137. struct ti_sci_info *info;
  2138. struct ti_sci_xfer *xfer;
  2139. int ret = 0;
  2140. if (IS_ERR(handle))
  2141. return PTR_ERR(handle);
  2142. if (!handle)
  2143. return -EINVAL;
  2144. info = handle_to_ti_sci_info(handle);
  2145. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
  2146. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2147. (u32 *)&req, sizeof(req), sizeof(*resp));
  2148. if (IS_ERR(xfer)) {
  2149. ret = PTR_ERR(xfer);
  2150. return ret;
  2151. }
  2152. req.fwl_id = region->fwl_id;
  2153. req.region = region->region;
  2154. req.n_permission_regs = region->n_permission_regs;
  2155. req.control = region->control;
  2156. req.permissions[0] = region->permissions[0];
  2157. req.permissions[1] = region->permissions[1];
  2158. req.permissions[2] = region->permissions[2];
  2159. req.start_address = region->start_address;
  2160. req.end_address = region->end_address;
  2161. ret = ti_sci_do_xfer(info, xfer);
  2162. if (ret)
  2163. return ret;
  2164. return 0;
  2165. }
  2166. /**
  2167. * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
  2168. * @handle: pointer to TI SCI handle
  2169. * @region: region configuration parameters
  2170. *
  2171. * Return: 0 if all went well, else returns appropriate error value.
  2172. */
  2173. static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
  2174. struct ti_sci_msg_fwl_region *region)
  2175. {
  2176. struct ti_sci_msg_fwl_get_firewall_region_req req;
  2177. struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
  2178. struct ti_sci_info *info;
  2179. struct ti_sci_xfer *xfer;
  2180. int ret = 0;
  2181. if (IS_ERR(handle))
  2182. return PTR_ERR(handle);
  2183. if (!handle)
  2184. return -EINVAL;
  2185. info = handle_to_ti_sci_info(handle);
  2186. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
  2187. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2188. (u32 *)&req, sizeof(req), sizeof(*resp));
  2189. if (IS_ERR(xfer)) {
  2190. ret = PTR_ERR(xfer);
  2191. return ret;
  2192. }
  2193. req.fwl_id = region->fwl_id;
  2194. req.region = region->region;
  2195. req.n_permission_regs = region->n_permission_regs;
  2196. ret = ti_sci_do_xfer(info, xfer);
  2197. if (ret)
  2198. return ret;
  2199. resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
  2200. region->fwl_id = resp->fwl_id;
  2201. region->region = resp->region;
  2202. region->n_permission_regs = resp->n_permission_regs;
  2203. region->control = resp->control;
  2204. region->permissions[0] = resp->permissions[0];
  2205. region->permissions[1] = resp->permissions[1];
  2206. region->permissions[2] = resp->permissions[2];
  2207. region->start_address = resp->start_address;
  2208. region->end_address = resp->end_address;
  2209. return 0;
  2210. }
  2211. /**
  2212. * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
  2213. * @handle: pointer to TI SCI handle
  2214. * @region: region configuration parameters
  2215. *
  2216. * Return: 0 if all went well, else returns appropriate error value.
  2217. */
  2218. static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
  2219. struct ti_sci_msg_fwl_owner *owner)
  2220. {
  2221. struct ti_sci_msg_fwl_change_owner_info_req req;
  2222. struct ti_sci_msg_fwl_change_owner_info_resp *resp;
  2223. struct ti_sci_info *info;
  2224. struct ti_sci_xfer *xfer;
  2225. int ret = 0;
  2226. if (IS_ERR(handle))
  2227. return PTR_ERR(handle);
  2228. if (!handle)
  2229. return -EINVAL;
  2230. info = handle_to_ti_sci_info(handle);
  2231. xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
  2232. TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
  2233. (u32 *)&req, sizeof(req), sizeof(*resp));
  2234. if (IS_ERR(xfer)) {
  2235. ret = PTR_ERR(xfer);
  2236. return ret;
  2237. }
  2238. req.fwl_id = owner->fwl_id;
  2239. req.region = owner->region;
  2240. req.owner_index = owner->owner_index;
  2241. ret = ti_sci_do_xfer(info, xfer);
  2242. if (ret)
  2243. return ret;
  2244. resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
  2245. owner->fwl_id = resp->fwl_id;
  2246. owner->region = resp->region;
  2247. owner->owner_index = resp->owner_index;
  2248. owner->owner_privid = resp->owner_privid;
  2249. owner->owner_permission_bits = resp->owner_permission_bits;
  2250. return ret;
  2251. }
  2252. /*
  2253. * ti_sci_setup_ops() - Setup the operations structures
  2254. * @info: pointer to TISCI pointer
  2255. */
  2256. static void ti_sci_setup_ops(struct ti_sci_info *info)
  2257. {
  2258. struct ti_sci_ops *ops = &info->handle.ops;
  2259. struct ti_sci_board_ops *bops = &ops->board_ops;
  2260. struct ti_sci_dev_ops *dops = &ops->dev_ops;
  2261. struct ti_sci_clk_ops *cops = &ops->clk_ops;
  2262. struct ti_sci_core_ops *core_ops = &ops->core_ops;
  2263. struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
  2264. struct ti_sci_proc_ops *pops = &ops->proc_ops;
  2265. struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
  2266. struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
  2267. struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
  2268. struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
  2269. bops->board_config = ti_sci_cmd_set_board_config;
  2270. bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
  2271. bops->board_config_security = ti_sci_cmd_set_board_config_security;
  2272. bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
  2273. dops->get_device = ti_sci_cmd_get_device;
  2274. dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
  2275. dops->idle_device = ti_sci_cmd_idle_device;
  2276. dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
  2277. dops->put_device = ti_sci_cmd_put_device;
  2278. dops->is_valid = ti_sci_cmd_dev_is_valid;
  2279. dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
  2280. dops->is_idle = ti_sci_cmd_dev_is_idle;
  2281. dops->is_stop = ti_sci_cmd_dev_is_stop;
  2282. dops->is_on = ti_sci_cmd_dev_is_on;
  2283. dops->is_transitioning = ti_sci_cmd_dev_is_trans;
  2284. dops->set_device_resets = ti_sci_cmd_set_device_resets;
  2285. dops->get_device_resets = ti_sci_cmd_get_device_resets;
  2286. dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
  2287. cops->get_clock = ti_sci_cmd_get_clock;
  2288. cops->idle_clock = ti_sci_cmd_idle_clock;
  2289. cops->put_clock = ti_sci_cmd_put_clock;
  2290. cops->is_auto = ti_sci_cmd_clk_is_auto;
  2291. cops->is_on = ti_sci_cmd_clk_is_on;
  2292. cops->is_off = ti_sci_cmd_clk_is_off;
  2293. cops->set_parent = ti_sci_cmd_clk_set_parent;
  2294. cops->get_parent = ti_sci_cmd_clk_get_parent;
  2295. cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
  2296. cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
  2297. cops->set_freq = ti_sci_cmd_clk_set_freq;
  2298. cops->get_freq = ti_sci_cmd_clk_get_freq;
  2299. core_ops->reboot_device = ti_sci_cmd_core_reboot;
  2300. core_ops->query_msmc = ti_sci_cmd_query_msmc;
  2301. rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
  2302. rm_core_ops->get_range_from_shost =
  2303. ti_sci_cmd_get_resource_range_from_shost;
  2304. pops->proc_request = ti_sci_cmd_proc_request;
  2305. pops->proc_release = ti_sci_cmd_proc_release;
  2306. pops->proc_handover = ti_sci_cmd_proc_handover;
  2307. pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
  2308. pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
  2309. pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
  2310. pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
  2311. pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
  2312. rops->config = ti_sci_cmd_ring_config;
  2313. psilops->pair = ti_sci_cmd_rm_psil_pair;
  2314. psilops->unpair = ti_sci_cmd_rm_psil_unpair;
  2315. udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
  2316. udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
  2317. udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
  2318. fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
  2319. fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
  2320. fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
  2321. }
  2322. /**
  2323. * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
  2324. * @dev: Pointer to the SYSFW device
  2325. *
  2326. * Return: pointer to handle if successful, else EINVAL if invalid conditions
  2327. * are encountered.
  2328. */
  2329. const
  2330. struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
  2331. {
  2332. if (!sci_dev)
  2333. return ERR_PTR(-EINVAL);
  2334. struct ti_sci_info *info = dev_get_priv(sci_dev);
  2335. if (!info)
  2336. return ERR_PTR(-EINVAL);
  2337. struct ti_sci_handle *handle = &info->handle;
  2338. if (!handle)
  2339. return ERR_PTR(-EINVAL);
  2340. return handle;
  2341. }
  2342. /**
  2343. * ti_sci_get_handle() - Get the TI SCI handle for a device
  2344. * @dev: Pointer to device for which we want SCI handle
  2345. *
  2346. * Return: pointer to handle if successful, else EINVAL if invalid conditions
  2347. * are encountered.
  2348. */
  2349. const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
  2350. {
  2351. if (!dev)
  2352. return ERR_PTR(-EINVAL);
  2353. struct udevice *sci_dev = dev_get_parent(dev);
  2354. return ti_sci_get_handle_from_sysfw(sci_dev);
  2355. }
  2356. /**
  2357. * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
  2358. * @dev: device node
  2359. * @propname: property name containing phandle on TISCI node
  2360. *
  2361. * Return: pointer to handle if successful, else appropriate error value.
  2362. */
  2363. const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
  2364. const char *property)
  2365. {
  2366. struct ti_sci_info *entry, *info = NULL;
  2367. u32 phandle, err;
  2368. ofnode node;
  2369. err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
  2370. if (err)
  2371. return ERR_PTR(err);
  2372. node = ofnode_get_by_phandle(phandle);
  2373. if (!ofnode_valid(node))
  2374. return ERR_PTR(-EINVAL);
  2375. list_for_each_entry(entry, &ti_sci_list, list)
  2376. if (ofnode_equal(dev_ofnode(entry->dev), node)) {
  2377. info = entry;
  2378. break;
  2379. }
  2380. if (!info)
  2381. return ERR_PTR(-ENODEV);
  2382. return &info->handle;
  2383. }
  2384. /**
  2385. * ti_sci_of_to_info() - generate private data from device tree
  2386. * @dev: corresponding system controller interface device
  2387. * @info: pointer to driver specific private data
  2388. *
  2389. * Return: 0 if all goes good, else appropriate error message.
  2390. */
  2391. static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
  2392. {
  2393. int ret;
  2394. ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
  2395. if (ret) {
  2396. dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
  2397. __func__, ret);
  2398. return ret;
  2399. }
  2400. ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
  2401. if (ret) {
  2402. dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
  2403. __func__, ret);
  2404. return ret;
  2405. }
  2406. /* Notify channel is optional. Enable only if populated */
  2407. ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
  2408. if (ret) {
  2409. dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
  2410. __func__, ret);
  2411. }
  2412. info->host_id = dev_read_u32_default(dev, "ti,host-id",
  2413. info->desc->default_host_id);
  2414. info->is_secure = dev_read_bool(dev, "ti,secure-host");
  2415. return 0;
  2416. }
  2417. /**
  2418. * ti_sci_probe() - Basic probe
  2419. * @dev: corresponding system controller interface device
  2420. *
  2421. * Return: 0 if all goes good, else appropriate error message.
  2422. */
  2423. static int ti_sci_probe(struct udevice *dev)
  2424. {
  2425. struct ti_sci_info *info;
  2426. int ret;
  2427. debug("%s(dev=%p)\n", __func__, dev);
  2428. info = dev_get_priv(dev);
  2429. info->desc = (void *)dev_get_driver_data(dev);
  2430. ret = ti_sci_of_to_info(dev, info);
  2431. if (ret) {
  2432. dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
  2433. return ret;
  2434. }
  2435. info->dev = dev;
  2436. info->seq = 0xA;
  2437. list_add_tail(&info->list, &ti_sci_list);
  2438. ti_sci_setup_ops(info);
  2439. ret = ti_sci_cmd_get_revision(&info->handle);
  2440. INIT_LIST_HEAD(&info->dev_list);
  2441. return ret;
  2442. }
  2443. /**
  2444. * ti_sci_dm_probe() - Basic probe for DM to TIFS SCI
  2445. * @dev: corresponding system controller interface device
  2446. *
  2447. * Return: 0 if all goes good, else appropriate error message.
  2448. */
  2449. static __maybe_unused int ti_sci_dm_probe(struct udevice *dev)
  2450. {
  2451. struct ti_sci_rm_core_ops *rm_core_ops;
  2452. struct ti_sci_rm_udmap_ops *udmap_ops;
  2453. struct ti_sci_rm_ringacc_ops *rops;
  2454. struct ti_sci_rm_psil_ops *psilops;
  2455. struct ti_sci_ops *ops;
  2456. struct ti_sci_info *info;
  2457. int ret;
  2458. debug("%s(dev=%p)\n", __func__, dev);
  2459. info = dev_get_priv(dev);
  2460. info->desc = (void *)dev_get_driver_data(dev);
  2461. ret = ti_sci_of_to_info(dev, info);
  2462. if (ret) {
  2463. dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
  2464. return ret;
  2465. }
  2466. info->dev = dev;
  2467. info->seq = 0xA;
  2468. list_add_tail(&info->list, &ti_sci_list);
  2469. ops = &info->handle.ops;
  2470. rm_core_ops = &ops->rm_core_ops;
  2471. rm_core_ops->get_range = ti_sci_cmd_get_resource_range_static;
  2472. rops = &ops->rm_ring_ops;
  2473. rops->config = ti_sci_cmd_ring_config;
  2474. psilops = &ops->rm_psil_ops;
  2475. psilops->pair = ti_sci_cmd_rm_psil_pair;
  2476. psilops->unpair = ti_sci_cmd_rm_psil_unpair;
  2477. udmap_ops = &ops->rm_udmap_ops;
  2478. udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
  2479. udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
  2480. udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
  2481. return ret;
  2482. }
  2483. /*
  2484. * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
  2485. * @res: Pointer to the TISCI resource
  2486. *
  2487. * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
  2488. */
  2489. u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
  2490. {
  2491. u16 set, free_bit;
  2492. for (set = 0; set < res->sets; set++) {
  2493. free_bit = find_first_zero_bit(res->desc[set].res_map,
  2494. res->desc[set].num);
  2495. if (free_bit != res->desc[set].num) {
  2496. set_bit(free_bit, res->desc[set].res_map);
  2497. return res->desc[set].start + free_bit;
  2498. }
  2499. }
  2500. return TI_SCI_RESOURCE_NULL;
  2501. }
  2502. /**
  2503. * ti_sci_release_resource() - Release a resource from TISCI resource.
  2504. * @res: Pointer to the TISCI resource
  2505. */
  2506. void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
  2507. {
  2508. u16 set;
  2509. for (set = 0; set < res->sets; set++) {
  2510. if (res->desc[set].start <= id &&
  2511. (res->desc[set].num + res->desc[set].start) > id)
  2512. clear_bit(id - res->desc[set].start,
  2513. res->desc[set].res_map);
  2514. }
  2515. }
  2516. /**
  2517. * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
  2518. * @handle: TISCI handle
  2519. * @dev: Device pointer to which the resource is assigned
  2520. * @of_prop: property name by which the resource are represented
  2521. *
  2522. * Note: This function expects of_prop to be in the form of tuples
  2523. * <type, subtype>. Allocates and initializes ti_sci_resource structure
  2524. * for each of_prop. Client driver can directly call
  2525. * ti_sci_(get_free, release)_resource apis for handling the resource.
  2526. *
  2527. * Return: Pointer to ti_sci_resource if all went well else appropriate
  2528. * error pointer.
  2529. */
  2530. struct ti_sci_resource *
  2531. devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
  2532. struct udevice *dev, u32 dev_id, char *of_prop)
  2533. {
  2534. u32 resource_subtype;
  2535. struct ti_sci_resource *res;
  2536. bool valid_set = false;
  2537. int sets, i, ret;
  2538. u32 *temp;
  2539. res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
  2540. if (!res)
  2541. return ERR_PTR(-ENOMEM);
  2542. sets = dev_read_size(dev, of_prop);
  2543. if (sets < 0) {
  2544. dev_err(dev, "%s resource type ids not available\n", of_prop);
  2545. return ERR_PTR(sets);
  2546. }
  2547. temp = malloc(sets);
  2548. sets /= sizeof(u32);
  2549. res->sets = sets;
  2550. res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
  2551. GFP_KERNEL);
  2552. if (!res->desc)
  2553. return ERR_PTR(-ENOMEM);
  2554. ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
  2555. if (ret)
  2556. return ERR_PTR(-EINVAL);
  2557. for (i = 0; i < res->sets; i++) {
  2558. resource_subtype = temp[i];
  2559. ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
  2560. resource_subtype,
  2561. &res->desc[i].start,
  2562. &res->desc[i].num);
  2563. if (ret) {
  2564. dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
  2565. dev_id, resource_subtype,
  2566. handle_to_ti_sci_info(handle)->host_id);
  2567. res->desc[i].start = 0;
  2568. res->desc[i].num = 0;
  2569. continue;
  2570. }
  2571. valid_set = true;
  2572. dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
  2573. dev_id, resource_subtype, res->desc[i].start,
  2574. res->desc[i].num);
  2575. res->desc[i].res_map =
  2576. devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
  2577. sizeof(*res->desc[i].res_map), GFP_KERNEL);
  2578. if (!res->desc[i].res_map)
  2579. return ERR_PTR(-ENOMEM);
  2580. }
  2581. if (valid_set)
  2582. return res;
  2583. return ERR_PTR(-EINVAL);
  2584. }
  2585. /* Description for K2G */
  2586. static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
  2587. .default_host_id = 2,
  2588. /* Conservative duration */
  2589. .max_rx_timeout_ms = 10000,
  2590. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  2591. .max_msgs = 20,
  2592. .max_msg_size = 64,
  2593. };
  2594. /* Description for AM654 */
  2595. static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
  2596. .default_host_id = 12,
  2597. /* Conservative duration */
  2598. .max_rx_timeout_ms = 10000,
  2599. /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
  2600. .max_msgs = 20,
  2601. .max_msg_size = 60,
  2602. };
  2603. /* Description for J721e DM to DMSC communication */
  2604. static const struct ti_sci_desc ti_sci_dm_j721e_desc = {
  2605. .default_host_id = 3,
  2606. .max_rx_timeout_ms = 10000,
  2607. .max_msgs = 20,
  2608. .max_msg_size = 60,
  2609. };
  2610. static const struct udevice_id ti_sci_ids[] = {
  2611. {
  2612. .compatible = "ti,k2g-sci",
  2613. .data = (ulong)&ti_sci_pmmc_k2g_desc
  2614. },
  2615. {
  2616. .compatible = "ti,am654-sci",
  2617. .data = (ulong)&ti_sci_pmmc_am654_desc
  2618. },
  2619. { /* Sentinel */ },
  2620. };
  2621. static __maybe_unused const struct udevice_id ti_sci_dm_ids[] = {
  2622. {
  2623. .compatible = "ti,j721e-dm-sci",
  2624. .data = (ulong)&ti_sci_dm_j721e_desc
  2625. },
  2626. { /* Sentinel */ },
  2627. };
  2628. U_BOOT_DRIVER(ti_sci) = {
  2629. .name = "ti_sci",
  2630. .id = UCLASS_FIRMWARE,
  2631. .of_match = ti_sci_ids,
  2632. .probe = ti_sci_probe,
  2633. .priv_auto = sizeof(struct ti_sci_info),
  2634. };
  2635. #if IS_ENABLED(CONFIG_K3_DM_FW)
  2636. U_BOOT_DRIVER(ti_sci_dm) = {
  2637. .name = "ti_sci_dm",
  2638. .id = UCLASS_FIRMWARE,
  2639. .of_match = ti_sci_dm_ids,
  2640. .probe = ti_sci_dm_probe,
  2641. .priv_auto = sizeof(struct ti_sci_info),
  2642. };
  2643. #endif