raw_mode.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Raw mode support
  4. *
  5. * Copyright (C) 2022 ARM Ltd.
  6. */
  7. /**
  8. * DOC: Theory of operation
  9. *
  10. * When enabled the SCMI Raw mode support exposes a userspace API which allows
  11. * to send and receive SCMI commands, replies and notifications from a user
  12. * application through injection and snooping of bare SCMI messages in binary
  13. * little-endian format.
  14. *
  15. * Such injected SCMI transactions will then be routed through the SCMI core
  16. * stack towards the SCMI backend server using whatever SCMI transport is
  17. * currently configured on the system under test.
  18. *
  19. * It is meant to help in running any sort of SCMI backend server testing, no
  20. * matter where the server is placed, as long as it is normally reachable via
  21. * the transport configured on the system.
  22. *
  23. * It is activated by a Kernel configuration option since it is NOT meant to
  24. * be used in production but only during development and in CI deployments.
  25. *
  26. * In order to avoid possible interferences between the SCMI Raw transactions
  27. * originated from a test-suite and the normal operations of the SCMI drivers,
  28. * when Raw mode is enabled, by default, all the regular SCMI drivers are
  29. * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
  30. * latter case the regular SCMI stack drivers will be loaded as usual and it is
  31. * up to the user of this interface to take care of manually inhibiting the
  32. * regular SCMI drivers in order to avoid interferences during the test runs.
  33. *
  34. * The exposed API is as follows.
  35. *
  36. * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
  37. * which in turn is rooted under the corresponding underlying SCMI instance.
  38. *
  39. * /sys/kernel/debug/scmi/
  40. * `-- 0
  41. * |-- atomic_threshold_us
  42. * |-- instance_name
  43. * |-- raw
  44. * | |-- channels
  45. * | | |-- 0x10
  46. * | | | |-- message
  47. * | | | `-- message_async
  48. * | | `-- 0x13
  49. * | | |-- message
  50. * | | `-- message_async
  51. * | |-- errors
  52. * | |-- message
  53. * | |-- message_async
  54. * | |-- notification
  55. * | `-- reset
  56. * `-- transport
  57. * |-- is_atomic
  58. * |-- max_msg_size
  59. * |-- max_rx_timeout_ms
  60. * |-- rx_max_msg
  61. * |-- tx_max_msg
  62. * `-- type
  63. *
  64. * where:
  65. *
  66. * - errors: used to read back timed-out and unexpected replies
  67. * - message*: used to send sync/async commands and read back immediate and
  68. * delayed reponses (if any)
  69. * - notification: used to read any notification being emitted by the system
  70. * (if previously enabled by the user app)
  71. * - reset: used to flush the queues of messages (of any kind) still pending
  72. * to be read; this is useful at test-suite start/stop to get
  73. * rid of any unread messages from the previous run.
  74. *
  75. * with the per-channel entries rooted at /channels being present only on a
  76. * system where multiple transport channels have been configured.
  77. *
  78. * Such per-channel entries can be used to explicitly choose a specific channel
  79. * for SCMI bare message injection, in contrast with the general entries above
  80. * where, instead, the selection of the proper channel to use is automatically
  81. * performed based the protocol embedded in the injected message and on how the
  82. * transport is configured on the system.
  83. *
  84. * Note that other common general entries are available under transport/ to let
  85. * the user applications properly make up their expectations in terms of
  86. * timeouts and message characteristics.
  87. *
  88. * Each write to the message* entries causes one command request to be built
  89. * and sent while the replies or delayed response are read back from those same
  90. * entries one message at time (receiving an EOF at each message boundary).
  91. *
  92. * The user application running the test is in charge of handling timeouts
  93. * on replies and properly choosing SCMI sequence numbers for the outgoing
  94. * requests (using the same sequence number is supported but discouraged).
  95. *
  96. * Injection of multiple in-flight requests is supported as long as the user
  97. * application uses properly distinct sequence numbers for concurrent requests
  98. * and takes care to properly manage all the related issues about concurrency
  99. * and command/reply pairing. Keep in mind that, anyway, the real level of
  100. * parallelism attainable in such scenario is dependent on the characteristics
  101. * of the underlying transport being used.
  102. *
  103. * Since the SCMI core regular stack is partially used to deliver and collect
  104. * the messages, late replies arrived after timeouts and any other sort of
  105. * unexpected message can be identified by the SCMI core as usual and they will
  106. * be reported as messages under "errors" for later analysis.
  107. */
  108. #include <linux/bitmap.h>
  109. #include <linux/debugfs.h>
  110. #include <linux/delay.h>
  111. #include <linux/device.h>
  112. #include <linux/export.h>
  113. #include <linux/io.h>
  114. #include <linux/kernel.h>
  115. #include <linux/fs.h>
  116. #include <linux/list.h>
  117. #include <linux/module.h>
  118. #include <linux/poll.h>
  119. #include <linux/of.h>
  120. #include <linux/slab.h>
  121. #include <linux/xarray.h>
  122. #include "common.h"
  123. #include "raw_mode.h"
  124. #include <trace/events/scmi.h>
  125. #define SCMI_XFER_RAW_MAX_RETRIES 10
  126. /**
  127. * struct scmi_raw_queue - Generic Raw queue descriptor
  128. *
  129. * @free_bufs: A freelists listhead used to keep unused raw buffers
  130. * @free_bufs_lock: Spinlock used to protect access to @free_bufs
  131. * @msg_q: A listhead to a queue of snooped messages waiting to be read out
  132. * @msg_q_lock: Spinlock used to protect access to @msg_q
  133. * @wq: A waitqueue used to wait and poll on related @msg_q
  134. */
  135. struct scmi_raw_queue {
  136. struct list_head free_bufs;
  137. /* Protect free_bufs[] lists */
  138. spinlock_t free_bufs_lock;
  139. struct list_head msg_q;
  140. /* Protect msg_q[] lists */
  141. spinlock_t msg_q_lock;
  142. wait_queue_head_t wq;
  143. };
  144. /**
  145. * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
  146. *
  147. * @id: Sequential Raw instance ID.
  148. * @handle: Pointer to SCMI entity handle to use
  149. * @desc: Pointer to the transport descriptor to use
  150. * @tx_max_msg: Maximum number of concurrent TX in-flight messages
  151. * @q: An array of Raw queue descriptors
  152. * @chans_q: An XArray mapping optional additional per-channel queues
  153. * @free_waiters: Head of freelist for unused waiters
  154. * @free_mtx: A mutex to protect the waiters freelist
  155. * @active_waiters: Head of list for currently active and used waiters
  156. * @active_mtx: A mutex to protect the active waiters list
  157. * @waiters_work: A work descriptor to be used with the workqueue machinery
  158. * @wait_wq: A workqueue reference to the created workqueue
  159. * @dentry: Top debugfs root dentry for SCMI Raw
  160. * @gid: A group ID used for devres accounting
  161. *
  162. * Note that this descriptor is passed back to the core after SCMI Raw is
  163. * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
  164. *
  165. */
  166. struct scmi_raw_mode_info {
  167. unsigned int id;
  168. const struct scmi_handle *handle;
  169. const struct scmi_desc *desc;
  170. int tx_max_msg;
  171. struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
  172. struct xarray chans_q;
  173. struct list_head free_waiters;
  174. /* Protect free_waiters list */
  175. struct mutex free_mtx;
  176. struct list_head active_waiters;
  177. /* Protect active_waiters list */
  178. struct mutex active_mtx;
  179. struct work_struct waiters_work;
  180. struct workqueue_struct *wait_wq;
  181. struct dentry *dentry;
  182. void *gid;
  183. };
  184. /**
  185. * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
  186. *
  187. * @start_jiffies: The timestamp in jiffies of when this structure was queued.
  188. * @cinfo: A reference to the channel to use for this transaction
  189. * @xfer: A reference to the xfer to be waited for
  190. * @async_response: A completion to be, optionally, used for async waits: it
  191. * will be setup by @scmi_do_xfer_raw_start, if needed, to be
  192. * pointed at by xfer->async_done.
  193. * @node: A list node.
  194. */
  195. struct scmi_xfer_raw_waiter {
  196. unsigned long start_jiffies;
  197. struct scmi_chan_info *cinfo;
  198. struct scmi_xfer *xfer;
  199. struct completion async_response;
  200. struct list_head node;
  201. };
  202. /**
  203. * struct scmi_raw_buffer - Structure to hold a full SCMI message
  204. *
  205. * @max_len: The maximum allowed message size (header included) that can be
  206. * stored into @msg
  207. * @msg: A message buffer used to collect a full message grabbed from an xfer.
  208. * @node: A list node.
  209. */
  210. struct scmi_raw_buffer {
  211. size_t max_len;
  212. struct scmi_msg msg;
  213. struct list_head node;
  214. };
  215. /**
  216. * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
  217. * layer
  218. *
  219. * @chan_id: The preferred channel to use: if zero the channel is automatically
  220. * selected based on protocol.
  221. * @raw: A reference to the Raw instance.
  222. * @tx: A message buffer used to collect TX message on write.
  223. * @tx_size: The effective size of the TX message.
  224. * @tx_req_size: The final expected size of the complete TX message.
  225. * @rx: A message buffer to collect RX message on read.
  226. * @rx_size: The effective size of the RX message.
  227. */
  228. struct scmi_dbg_raw_data {
  229. u8 chan_id;
  230. struct scmi_raw_mode_info *raw;
  231. struct scmi_msg tx;
  232. size_t tx_size;
  233. size_t tx_req_size;
  234. struct scmi_msg rx;
  235. size_t rx_size;
  236. };
  237. static struct scmi_raw_queue *
  238. scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
  239. unsigned int chan_id)
  240. {
  241. if (!chan_id)
  242. return raw->q[idx];
  243. return xa_load(&raw->chans_q, chan_id);
  244. }
  245. static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
  246. {
  247. unsigned long flags;
  248. struct scmi_raw_buffer *rb = NULL;
  249. struct list_head *head = &q->free_bufs;
  250. spin_lock_irqsave(&q->free_bufs_lock, flags);
  251. if (!list_empty(head)) {
  252. rb = list_first_entry(head, struct scmi_raw_buffer, node);
  253. list_del_init(&rb->node);
  254. }
  255. spin_unlock_irqrestore(&q->free_bufs_lock, flags);
  256. return rb;
  257. }
  258. static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
  259. struct scmi_raw_buffer *rb)
  260. {
  261. unsigned long flags;
  262. /* Reset to full buffer length */
  263. rb->msg.len = rb->max_len;
  264. spin_lock_irqsave(&q->free_bufs_lock, flags);
  265. list_add_tail(&rb->node, &q->free_bufs);
  266. spin_unlock_irqrestore(&q->free_bufs_lock, flags);
  267. }
  268. static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
  269. struct scmi_raw_buffer *rb)
  270. {
  271. unsigned long flags;
  272. spin_lock_irqsave(&q->msg_q_lock, flags);
  273. list_add_tail(&rb->node, &q->msg_q);
  274. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  275. wake_up_interruptible(&q->wq);
  276. }
  277. static struct scmi_raw_buffer*
  278. scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
  279. {
  280. struct scmi_raw_buffer *rb = NULL;
  281. if (!list_empty(&q->msg_q)) {
  282. rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
  283. list_del_init(&rb->node);
  284. }
  285. return rb;
  286. }
  287. static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
  288. {
  289. unsigned long flags;
  290. struct scmi_raw_buffer *rb;
  291. spin_lock_irqsave(&q->msg_q_lock, flags);
  292. rb = scmi_raw_buffer_dequeue_unlocked(q);
  293. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  294. return rb;
  295. }
  296. static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
  297. {
  298. struct scmi_raw_buffer *rb;
  299. do {
  300. rb = scmi_raw_buffer_dequeue(q);
  301. if (rb)
  302. scmi_raw_buffer_put(q, rb);
  303. } while (rb);
  304. }
  305. static struct scmi_xfer_raw_waiter *
  306. scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
  307. struct scmi_chan_info *cinfo, bool async)
  308. {
  309. struct scmi_xfer_raw_waiter *rw = NULL;
  310. mutex_lock(&raw->free_mtx);
  311. if (!list_empty(&raw->free_waiters)) {
  312. rw = list_first_entry(&raw->free_waiters,
  313. struct scmi_xfer_raw_waiter, node);
  314. list_del_init(&rw->node);
  315. if (async) {
  316. reinit_completion(&rw->async_response);
  317. xfer->async_done = &rw->async_response;
  318. }
  319. rw->cinfo = cinfo;
  320. rw->xfer = xfer;
  321. }
  322. mutex_unlock(&raw->free_mtx);
  323. return rw;
  324. }
  325. static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
  326. struct scmi_xfer_raw_waiter *rw)
  327. {
  328. if (rw->xfer) {
  329. rw->xfer->async_done = NULL;
  330. rw->xfer = NULL;
  331. }
  332. mutex_lock(&raw->free_mtx);
  333. list_add_tail(&rw->node, &raw->free_waiters);
  334. mutex_unlock(&raw->free_mtx);
  335. }
  336. static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
  337. struct scmi_xfer_raw_waiter *rw)
  338. {
  339. /* A timestamp for the deferred worker to know how much this has aged */
  340. rw->start_jiffies = jiffies;
  341. trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id,
  342. rw->xfer->hdr.protocol_id,
  343. rw->xfer->hdr.seq,
  344. raw->desc->max_rx_timeout_ms,
  345. rw->xfer->hdr.poll_completion);
  346. mutex_lock(&raw->active_mtx);
  347. list_add_tail(&rw->node, &raw->active_waiters);
  348. mutex_unlock(&raw->active_mtx);
  349. /* kick waiter work */
  350. queue_work(raw->wait_wq, &raw->waiters_work);
  351. }
  352. static struct scmi_xfer_raw_waiter *
  353. scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
  354. {
  355. struct scmi_xfer_raw_waiter *rw = NULL;
  356. mutex_lock(&raw->active_mtx);
  357. if (!list_empty(&raw->active_waiters)) {
  358. rw = list_first_entry(&raw->active_waiters,
  359. struct scmi_xfer_raw_waiter, node);
  360. list_del_init(&rw->node);
  361. }
  362. mutex_unlock(&raw->active_mtx);
  363. return rw;
  364. }
  365. /**
  366. * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
  367. *
  368. * @work: A reference to the work.
  369. *
  370. * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
  371. * cannot wait to receive its response (if any) in the context of the injection
  372. * routines so as not to leave the userspace write syscall, which delivered the
  373. * SCMI message to send, pending till eventually a reply is received.
  374. * Userspace should and will poll/wait instead on the read syscalls which will
  375. * be in charge of reading a received reply (if any).
  376. *
  377. * Even though reply messages are collected and reported into the SCMI Raw layer
  378. * on the RX path, nonetheless we have to properly wait for their completion as
  379. * usual (and async_completion too if needed) in order to properly release the
  380. * xfer structure at the end: to do this out of the context of the write/send
  381. * these waiting jobs are delegated to this deferred worker.
  382. *
  383. * Any sent xfer, to be waited for, is timestamped and queued for later
  384. * consumption by this worker: queue aging is accounted for while choosing a
  385. * timeout for the completion, BUT we do not really care here if we end up
  386. * accidentally waiting for a bit too long.
  387. */
  388. static void scmi_xfer_raw_worker(struct work_struct *work)
  389. {
  390. struct scmi_raw_mode_info *raw;
  391. struct device *dev;
  392. unsigned long max_tmo;
  393. raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
  394. dev = raw->handle->dev;
  395. max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
  396. do {
  397. int ret = 0;
  398. unsigned int timeout_ms;
  399. unsigned long aging;
  400. struct scmi_xfer *xfer;
  401. struct scmi_xfer_raw_waiter *rw;
  402. struct scmi_chan_info *cinfo;
  403. rw = scmi_xfer_raw_waiter_dequeue(raw);
  404. if (!rw)
  405. return;
  406. cinfo = rw->cinfo;
  407. xfer = rw->xfer;
  408. /*
  409. * Waiters are queued by wait-deadline at the end, so some of
  410. * them could have been already expired when processed, BUT we
  411. * have to check the completion status anyway just in case a
  412. * virtually expired (aged) transaction was indeed completed
  413. * fine and we'll have to wait for the asynchronous part (if
  414. * any): for this reason a 1 ms timeout is used for already
  415. * expired/aged xfers.
  416. */
  417. aging = jiffies - rw->start_jiffies;
  418. timeout_ms = max_tmo > aging ?
  419. jiffies_to_msecs(max_tmo - aging) : 1;
  420. ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
  421. timeout_ms);
  422. if (!ret && xfer->hdr.status)
  423. ret = scmi_to_linux_errno(xfer->hdr.status);
  424. if (raw->desc->ops->mark_txdone)
  425. raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
  426. trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
  427. xfer->hdr.protocol_id, xfer->hdr.seq, ret);
  428. /* Wait also for an async delayed response if needed */
  429. if (!ret && xfer->async_done) {
  430. unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
  431. if (!wait_for_completion_timeout(xfer->async_done, tmo))
  432. dev_err(dev,
  433. "timed out in RAW delayed resp - HDR:%08X\n",
  434. pack_scmi_header(&xfer->hdr));
  435. }
  436. /* Release waiter and xfer */
  437. scmi_xfer_raw_put(raw->handle, xfer);
  438. scmi_xfer_raw_waiter_put(raw, rw);
  439. } while (1);
  440. }
  441. static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
  442. {
  443. int i;
  444. dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
  445. for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
  446. scmi_raw_buffer_queue_flush(raw->q[i]);
  447. }
  448. /**
  449. * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
  450. * bare SCMI message.
  451. *
  452. * @raw: A reference to the Raw instance.
  453. * @buf: A buffer containing the whole SCMI message to send (including the
  454. * header) in little-endian binary formmat.
  455. * @len: Length of the message in @buf.
  456. * @p: A pointer to return the initialized Raw xfer.
  457. *
  458. * After an xfer is picked from the TX pool and filled in with the message
  459. * content, the xfer is registered as pending with the core in the usual way
  460. * using the original sequence number provided by the user with the message.
  461. *
  462. * Note that, in case the testing user application is NOT using distinct
  463. * sequence-numbers between successive SCMI messages such registration could
  464. * fail temporarily if the previous message, using the same sequence number,
  465. * had still not released; in such a case we just wait and retry.
  466. *
  467. * Return: 0 on Success
  468. */
  469. static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
  470. size_t len, struct scmi_xfer **p)
  471. {
  472. u32 msg_hdr;
  473. size_t tx_size;
  474. struct scmi_xfer *xfer;
  475. int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
  476. struct device *dev = raw->handle->dev;
  477. if (!buf || len < sizeof(u32))
  478. return -EINVAL;
  479. tx_size = len - sizeof(u32);
  480. /* Ensure we have sane transfer sizes */
  481. if (tx_size > raw->desc->max_msg_size)
  482. return -ERANGE;
  483. xfer = scmi_xfer_raw_get(raw->handle);
  484. if (IS_ERR(xfer)) {
  485. dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
  486. return PTR_ERR(xfer);
  487. }
  488. /* Build xfer from the provided SCMI bare LE message */
  489. msg_hdr = le32_to_cpu(*((__le32 *)buf));
  490. unpack_scmi_header(msg_hdr, &xfer->hdr);
  491. xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
  492. /* Polling not supported */
  493. xfer->hdr.poll_completion = false;
  494. xfer->hdr.status = SCMI_SUCCESS;
  495. xfer->tx.len = tx_size;
  496. xfer->rx.len = raw->desc->max_msg_size;
  497. /* Clear the whole TX buffer */
  498. memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
  499. if (xfer->tx.len)
  500. memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
  501. *p = xfer;
  502. /*
  503. * In flight registration can temporarily fail in case of Raw messages
  504. * if the user injects messages without using monotonically increasing
  505. * sequence numbers since, in Raw mode, the xfer (and the token) is
  506. * finally released later by a deferred worker. Just retry for a while.
  507. */
  508. do {
  509. ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
  510. if (ret) {
  511. dev_dbg(dev,
  512. "...retrying[%d] inflight registration\n",
  513. retry);
  514. msleep(raw->desc->max_rx_timeout_ms /
  515. SCMI_XFER_RAW_MAX_RETRIES);
  516. }
  517. } while (ret && --retry);
  518. if (ret) {
  519. dev_warn(dev,
  520. "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
  521. xfer->hdr.seq, msg_hdr);
  522. scmi_xfer_raw_put(raw->handle, xfer);
  523. }
  524. return ret;
  525. }
  526. /**
  527. * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
  528. *
  529. * @raw: A reference to the Raw instance.
  530. * @xfer: The xfer to send
  531. * @chan_id: The channel ID to use, if zero the channels is automatically
  532. * selected based on the protocol used.
  533. * @async: A flag stating if an asynchronous command is required.
  534. *
  535. * This function send a previously built raw xfer using an appropriate channel
  536. * and queues the related waiting work.
  537. *
  538. * Note that we need to know explicitly if the required command is meant to be
  539. * asynchronous in kind since we have to properly setup the waiter.
  540. * (and deducing this from the payload is weak and do not scale given there is
  541. * NOT a common header-flag stating if the command is asynchronous or not)
  542. *
  543. * Return: 0 on Success
  544. */
  545. static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
  546. struct scmi_xfer *xfer, u8 chan_id,
  547. bool async)
  548. {
  549. int ret;
  550. struct scmi_chan_info *cinfo;
  551. struct scmi_xfer_raw_waiter *rw;
  552. struct device *dev = raw->handle->dev;
  553. if (!chan_id)
  554. chan_id = xfer->hdr.protocol_id;
  555. else
  556. xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
  557. cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
  558. if (IS_ERR(cinfo))
  559. return PTR_ERR(cinfo);
  560. rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
  561. if (!rw) {
  562. dev_warn(dev, "RAW - Cannot get a free waiter !\n");
  563. return -ENOMEM;
  564. }
  565. /* True ONLY if also supported by transport. */
  566. if (is_polling_enabled(cinfo, raw->desc))
  567. xfer->hdr.poll_completion = true;
  568. reinit_completion(&xfer->done);
  569. /* Make sure xfer state update is visible before sending */
  570. smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
  571. trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
  572. xfer->hdr.protocol_id, xfer->hdr.seq,
  573. xfer->hdr.poll_completion);
  574. ret = raw->desc->ops->send_message(rw->cinfo, xfer);
  575. if (ret) {
  576. dev_err(dev, "Failed to send RAW message %d\n", ret);
  577. scmi_xfer_raw_waiter_put(raw, rw);
  578. return ret;
  579. }
  580. trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
  581. xfer->hdr.id, "cmnd", xfer->hdr.seq,
  582. xfer->hdr.status,
  583. xfer->tx.buf, xfer->tx.len);
  584. scmi_xfer_raw_waiter_enqueue(raw, rw);
  585. return ret;
  586. }
  587. /**
  588. * scmi_raw_message_send - An helper to build and send an SCMI command using
  589. * the provided SCMI bare message buffer
  590. *
  591. * @raw: A reference to the Raw instance.
  592. * @buf: A buffer containing the whole SCMI message to send (including the
  593. * header) in little-endian binary format.
  594. * @len: Length of the message in @buf.
  595. * @chan_id: The channel ID to use.
  596. * @async: A flag stating if an asynchronous command is required.
  597. *
  598. * Return: 0 on Success
  599. */
  600. static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
  601. void *buf, size_t len, u8 chan_id, bool async)
  602. {
  603. int ret;
  604. struct scmi_xfer *xfer;
  605. ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
  606. if (ret)
  607. return ret;
  608. ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
  609. if (ret)
  610. scmi_xfer_raw_put(raw->handle, xfer);
  611. return ret;
  612. }
  613. static struct scmi_raw_buffer *
  614. scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
  615. {
  616. unsigned long flags;
  617. struct scmi_raw_buffer *rb;
  618. spin_lock_irqsave(&q->msg_q_lock, flags);
  619. while (list_empty(&q->msg_q)) {
  620. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  621. if (o_nonblock)
  622. return ERR_PTR(-EAGAIN);
  623. if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
  624. return ERR_PTR(-ERESTARTSYS);
  625. spin_lock_irqsave(&q->msg_q_lock, flags);
  626. }
  627. rb = scmi_raw_buffer_dequeue_unlocked(q);
  628. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  629. return rb;
  630. }
  631. /**
  632. * scmi_raw_message_receive - An helper to dequeue and report the next
  633. * available enqueued raw message payload that has been collected.
  634. *
  635. * @raw: A reference to the Raw instance.
  636. * @buf: A buffer to get hold of the whole SCMI message received and represented
  637. * in little-endian binary format.
  638. * @len: Length of @buf.
  639. * @size: The effective size of the message copied into @buf
  640. * @idx: The index of the queue to pick the next queued message from.
  641. * @chan_id: The channel ID to use.
  642. * @o_nonblock: A flag to request a non-blocking message dequeue.
  643. *
  644. * Return: 0 on Success
  645. */
  646. static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
  647. void *buf, size_t len, size_t *size,
  648. unsigned int idx, unsigned int chan_id,
  649. bool o_nonblock)
  650. {
  651. int ret = 0;
  652. struct scmi_raw_buffer *rb;
  653. struct scmi_raw_queue *q;
  654. q = scmi_raw_queue_select(raw, idx, chan_id);
  655. if (!q)
  656. return -ENODEV;
  657. rb = scmi_raw_message_dequeue(q, o_nonblock);
  658. if (IS_ERR(rb)) {
  659. dev_dbg(raw->handle->dev, "RAW - No message available!\n");
  660. return PTR_ERR(rb);
  661. }
  662. if (rb->msg.len <= len) {
  663. memcpy(buf, rb->msg.buf, rb->msg.len);
  664. *size = rb->msg.len;
  665. } else {
  666. ret = -ENOSPC;
  667. }
  668. scmi_raw_buffer_put(q, rb);
  669. return ret;
  670. }
  671. /* SCMI Raw debugfs helpers */
  672. static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
  673. char __user *buf,
  674. size_t count, loff_t *ppos,
  675. unsigned int idx)
  676. {
  677. ssize_t cnt;
  678. struct scmi_dbg_raw_data *rd = filp->private_data;
  679. if (!rd->rx_size) {
  680. int ret;
  681. ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
  682. &rd->rx_size, idx, rd->chan_id,
  683. filp->f_flags & O_NONBLOCK);
  684. if (ret) {
  685. rd->rx_size = 0;
  686. return ret;
  687. }
  688. /* Reset any previous filepos change, including writes */
  689. *ppos = 0;
  690. } else if (*ppos == rd->rx_size) {
  691. /* Return EOF once all the message has been read-out */
  692. rd->rx_size = 0;
  693. return 0;
  694. }
  695. cnt = simple_read_from_buffer(buf, count, ppos,
  696. rd->rx.buf, rd->rx_size);
  697. return cnt;
  698. }
  699. static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
  700. const char __user *buf,
  701. size_t count, loff_t *ppos,
  702. bool async)
  703. {
  704. int ret;
  705. struct scmi_dbg_raw_data *rd = filp->private_data;
  706. if (count > rd->tx.len - rd->tx_size)
  707. return -ENOSPC;
  708. /* On first write attempt @count carries the total full message size. */
  709. if (!rd->tx_size)
  710. rd->tx_req_size = count;
  711. /*
  712. * Gather a full message, possibly across multiple interrupted wrrtes,
  713. * before sending it with a single RAW xfer.
  714. */
  715. if (rd->tx_size < rd->tx_req_size) {
  716. ssize_t cnt;
  717. cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
  718. buf, count);
  719. if (cnt < 0)
  720. return cnt;
  721. rd->tx_size += cnt;
  722. if (cnt < count)
  723. return cnt;
  724. }
  725. ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
  726. rd->chan_id, async);
  727. /* Reset ppos for next message ... */
  728. rd->tx_size = 0;
  729. *ppos = 0;
  730. return ret ?: count;
  731. }
  732. static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
  733. struct poll_table_struct *wait,
  734. unsigned int idx)
  735. {
  736. unsigned long flags;
  737. struct scmi_dbg_raw_data *rd = filp->private_data;
  738. struct scmi_raw_queue *q;
  739. __poll_t mask = 0;
  740. q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
  741. if (!q)
  742. return mask;
  743. poll_wait(filp, &q->wq, wait);
  744. spin_lock_irqsave(&q->msg_q_lock, flags);
  745. if (!list_empty(&q->msg_q))
  746. mask = EPOLLIN | EPOLLRDNORM;
  747. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  748. return mask;
  749. }
  750. static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
  751. char __user *buf,
  752. size_t count, loff_t *ppos)
  753. {
  754. return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
  755. SCMI_RAW_REPLY_QUEUE);
  756. }
  757. static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
  758. const char __user *buf,
  759. size_t count, loff_t *ppos)
  760. {
  761. return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
  762. }
  763. static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
  764. struct poll_table_struct *wait)
  765. {
  766. return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE);
  767. }
  768. static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
  769. {
  770. u8 id;
  771. struct scmi_raw_mode_info *raw;
  772. struct scmi_dbg_raw_data *rd;
  773. const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
  774. if (!inode->i_private)
  775. return -ENODEV;
  776. raw = inode->i_private;
  777. rd = kzalloc(sizeof(*rd), GFP_KERNEL);
  778. if (!rd)
  779. return -ENOMEM;
  780. rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
  781. rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL);
  782. if (!rd->rx.buf) {
  783. kfree(rd);
  784. return -ENOMEM;
  785. }
  786. rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
  787. rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL);
  788. if (!rd->tx.buf) {
  789. kfree(rd->rx.buf);
  790. kfree(rd);
  791. return -ENOMEM;
  792. }
  793. /* Grab channel ID from debugfs entry naming if any */
  794. if (!kstrtou8(id_str, 16, &id))
  795. rd->chan_id = id;
  796. rd->raw = raw;
  797. filp->private_data = rd;
  798. return nonseekable_open(inode, filp);
  799. }
  800. static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
  801. {
  802. struct scmi_dbg_raw_data *rd = filp->private_data;
  803. kfree(rd->rx.buf);
  804. kfree(rd->tx.buf);
  805. kfree(rd);
  806. return 0;
  807. }
  808. static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
  809. const char __user *buf,
  810. size_t count, loff_t *ppos)
  811. {
  812. struct scmi_dbg_raw_data *rd = filp->private_data;
  813. scmi_xfer_raw_reset(rd->raw);
  814. return count;
  815. }
  816. static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
  817. .open = scmi_dbg_raw_mode_open,
  818. .release = scmi_dbg_raw_mode_release,
  819. .write = scmi_dbg_raw_mode_reset_write,
  820. .owner = THIS_MODULE,
  821. };
  822. static const struct file_operations scmi_dbg_raw_mode_message_fops = {
  823. .open = scmi_dbg_raw_mode_open,
  824. .release = scmi_dbg_raw_mode_release,
  825. .read = scmi_dbg_raw_mode_message_read,
  826. .write = scmi_dbg_raw_mode_message_write,
  827. .poll = scmi_dbg_raw_mode_message_poll,
  828. .owner = THIS_MODULE,
  829. };
  830. static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
  831. const char __user *buf,
  832. size_t count, loff_t *ppos)
  833. {
  834. return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
  835. }
  836. static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
  837. .open = scmi_dbg_raw_mode_open,
  838. .release = scmi_dbg_raw_mode_release,
  839. .read = scmi_dbg_raw_mode_message_read,
  840. .write = scmi_dbg_raw_mode_message_async_write,
  841. .poll = scmi_dbg_raw_mode_message_poll,
  842. .owner = THIS_MODULE,
  843. };
  844. static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
  845. char __user *buf,
  846. size_t count, loff_t *ppos)
  847. {
  848. return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
  849. SCMI_RAW_NOTIF_QUEUE);
  850. }
  851. static __poll_t
  852. scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
  853. struct poll_table_struct *wait)
  854. {
  855. return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE);
  856. }
  857. static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
  858. .open = scmi_dbg_raw_mode_open,
  859. .release = scmi_dbg_raw_mode_release,
  860. .read = scmi_test_dbg_raw_mode_notif_read,
  861. .poll = scmi_test_dbg_raw_mode_notif_poll,
  862. .owner = THIS_MODULE,
  863. };
  864. static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
  865. char __user *buf,
  866. size_t count, loff_t *ppos)
  867. {
  868. return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
  869. SCMI_RAW_ERRS_QUEUE);
  870. }
  871. static __poll_t
  872. scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
  873. struct poll_table_struct *wait)
  874. {
  875. return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE);
  876. }
  877. static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
  878. .open = scmi_dbg_raw_mode_open,
  879. .release = scmi_dbg_raw_mode_release,
  880. .read = scmi_test_dbg_raw_mode_errors_read,
  881. .poll = scmi_test_dbg_raw_mode_errors_poll,
  882. .owner = THIS_MODULE,
  883. };
  884. static struct scmi_raw_queue *
  885. scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
  886. {
  887. int i;
  888. struct scmi_raw_buffer *rb;
  889. struct device *dev = raw->handle->dev;
  890. struct scmi_raw_queue *q;
  891. q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
  892. if (!q)
  893. return ERR_PTR(-ENOMEM);
  894. rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
  895. if (!rb)
  896. return ERR_PTR(-ENOMEM);
  897. spin_lock_init(&q->free_bufs_lock);
  898. INIT_LIST_HEAD(&q->free_bufs);
  899. for (i = 0; i < raw->tx_max_msg; i++, rb++) {
  900. rb->max_len = raw->desc->max_msg_size + sizeof(u32);
  901. rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
  902. if (!rb->msg.buf)
  903. return ERR_PTR(-ENOMEM);
  904. scmi_raw_buffer_put(q, rb);
  905. }
  906. spin_lock_init(&q->msg_q_lock);
  907. INIT_LIST_HEAD(&q->msg_q);
  908. init_waitqueue_head(&q->wq);
  909. return q;
  910. }
  911. static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
  912. {
  913. int i;
  914. struct scmi_xfer_raw_waiter *rw;
  915. struct device *dev = raw->handle->dev;
  916. rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
  917. if (!rw)
  918. return -ENOMEM;
  919. raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
  920. WQ_UNBOUND | WQ_FREEZABLE |
  921. WQ_HIGHPRI | WQ_SYSFS, 0, raw->id);
  922. if (!raw->wait_wq)
  923. return -ENOMEM;
  924. mutex_init(&raw->free_mtx);
  925. INIT_LIST_HEAD(&raw->free_waiters);
  926. mutex_init(&raw->active_mtx);
  927. INIT_LIST_HEAD(&raw->active_waiters);
  928. for (i = 0; i < raw->tx_max_msg; i++, rw++) {
  929. init_completion(&rw->async_response);
  930. scmi_xfer_raw_waiter_put(raw, rw);
  931. }
  932. INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
  933. return 0;
  934. }
  935. static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
  936. u8 *channels, int num_chans)
  937. {
  938. int ret, idx;
  939. void *gid;
  940. struct device *dev = raw->handle->dev;
  941. gid = devres_open_group(dev, NULL, GFP_KERNEL);
  942. if (!gid)
  943. return -ENOMEM;
  944. for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
  945. raw->q[idx] = scmi_raw_queue_init(raw);
  946. if (IS_ERR(raw->q[idx])) {
  947. ret = PTR_ERR(raw->q[idx]);
  948. goto err;
  949. }
  950. }
  951. xa_init(&raw->chans_q);
  952. if (num_chans > 1) {
  953. int i;
  954. for (i = 0; i < num_chans; i++) {
  955. struct scmi_raw_queue *q;
  956. q = scmi_raw_queue_init(raw);
  957. if (IS_ERR(q)) {
  958. ret = PTR_ERR(q);
  959. goto err_xa;
  960. }
  961. ret = xa_insert(&raw->chans_q, channels[i], q,
  962. GFP_KERNEL);
  963. if (ret) {
  964. dev_err(dev,
  965. "Fail to allocate Raw queue 0x%02X\n",
  966. channels[i]);
  967. goto err_xa;
  968. }
  969. }
  970. }
  971. ret = scmi_xfer_raw_worker_init(raw);
  972. if (ret)
  973. goto err_xa;
  974. devres_close_group(dev, gid);
  975. raw->gid = gid;
  976. return 0;
  977. err_xa:
  978. xa_destroy(&raw->chans_q);
  979. err:
  980. devres_release_group(dev, gid);
  981. return ret;
  982. }
  983. /**
  984. * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
  985. *
  986. * @handle: Pointer to SCMI entity handle
  987. * @top_dentry: A reference to the top Raw debugfs dentry
  988. * @instance_id: The ID of the underlying SCMI platform instance represented by
  989. * this Raw instance
  990. * @channels: The list of the existing channels
  991. * @num_chans: The number of entries in @channels
  992. * @desc: Reference to the transport operations
  993. * @tx_max_msg: Max number of in-flight messages allowed by the transport
  994. *
  995. * This function prepare the SCMI Raw stack and creates the debugfs API.
  996. *
  997. * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
  998. */
  999. void *scmi_raw_mode_init(const struct scmi_handle *handle,
  1000. struct dentry *top_dentry, int instance_id,
  1001. u8 *channels, int num_chans,
  1002. const struct scmi_desc *desc, int tx_max_msg)
  1003. {
  1004. int ret;
  1005. struct scmi_raw_mode_info *raw;
  1006. struct device *dev;
  1007. if (!handle || !desc)
  1008. return ERR_PTR(-EINVAL);
  1009. dev = handle->dev;
  1010. raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
  1011. if (!raw)
  1012. return ERR_PTR(-ENOMEM);
  1013. raw->handle = handle;
  1014. raw->desc = desc;
  1015. raw->tx_max_msg = tx_max_msg;
  1016. raw->id = instance_id;
  1017. ret = scmi_raw_mode_setup(raw, channels, num_chans);
  1018. if (ret) {
  1019. devm_kfree(dev, raw);
  1020. return ERR_PTR(ret);
  1021. }
  1022. raw->dentry = debugfs_create_dir("raw", top_dentry);
  1023. debugfs_create_file("reset", 0200, raw->dentry, raw,
  1024. &scmi_dbg_raw_mode_reset_fops);
  1025. debugfs_create_file("message", 0600, raw->dentry, raw,
  1026. &scmi_dbg_raw_mode_message_fops);
  1027. debugfs_create_file("message_async", 0600, raw->dentry, raw,
  1028. &scmi_dbg_raw_mode_message_async_fops);
  1029. debugfs_create_file("notification", 0400, raw->dentry, raw,
  1030. &scmi_dbg_raw_mode_notification_fops);
  1031. debugfs_create_file("errors", 0400, raw->dentry, raw,
  1032. &scmi_dbg_raw_mode_errors_fops);
  1033. /*
  1034. * Expose per-channel entries if multiple channels available.
  1035. * Just ignore errors while setting up these interfaces since we
  1036. * have anyway already a working core Raw support.
  1037. */
  1038. if (num_chans > 1) {
  1039. int i;
  1040. struct dentry *top_chans;
  1041. top_chans = debugfs_create_dir("channels", raw->dentry);
  1042. for (i = 0; i < num_chans; i++) {
  1043. char cdir[8];
  1044. struct dentry *chd;
  1045. snprintf(cdir, 8, "0x%02X", channels[i]);
  1046. chd = debugfs_create_dir(cdir, top_chans);
  1047. debugfs_create_file("message", 0600, chd, raw,
  1048. &scmi_dbg_raw_mode_message_fops);
  1049. debugfs_create_file("message_async", 0600, chd, raw,
  1050. &scmi_dbg_raw_mode_message_async_fops);
  1051. }
  1052. }
  1053. dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
  1054. return raw;
  1055. }
  1056. /**
  1057. * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
  1058. *
  1059. * @r: An opaque handle to an initialized SCMI Raw instance
  1060. */
  1061. void scmi_raw_mode_cleanup(void *r)
  1062. {
  1063. struct scmi_raw_mode_info *raw = r;
  1064. if (!raw)
  1065. return;
  1066. debugfs_remove_recursive(raw->dentry);
  1067. cancel_work_sync(&raw->waiters_work);
  1068. destroy_workqueue(raw->wait_wq);
  1069. xa_destroy(&raw->chans_q);
  1070. }
  1071. static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
  1072. struct scmi_xfer *xfer)
  1073. {
  1074. __le32 *m;
  1075. size_t msg_size;
  1076. if (!xfer || !msg || !msg_len)
  1077. return -EINVAL;
  1078. /* Account for hdr ...*/
  1079. msg_size = xfer->rx.len + sizeof(u32);
  1080. /* ... and status if needed */
  1081. if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
  1082. msg_size += sizeof(u32);
  1083. if (msg_size > *msg_len)
  1084. return -ENOSPC;
  1085. m = msg;
  1086. *m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
  1087. if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
  1088. *++m = cpu_to_le32(xfer->hdr.status);
  1089. memcpy(++m, xfer->rx.buf, xfer->rx.len);
  1090. *msg_len = msg_size;
  1091. return 0;
  1092. }
  1093. /**
  1094. * scmi_raw_message_report - Helper to report back valid reponses/notifications
  1095. * to raw message requests.
  1096. *
  1097. * @r: An opaque reference to the raw instance configuration
  1098. * @xfer: The xfer containing the message to be reported
  1099. * @idx: The index of the queue.
  1100. * @chan_id: The channel ID to use.
  1101. *
  1102. * If Raw mode is enabled, this is called from the SCMI core on the regular RX
  1103. * path to save and enqueue the response/notification payload carried by this
  1104. * xfer into a dedicated scmi_raw_buffer for later consumption by the user.
  1105. *
  1106. * This way the caller can free the related xfer immediately afterwards and the
  1107. * user can read back the raw message payload at its own pace (if ever) without
  1108. * holding an xfer for too long.
  1109. */
  1110. void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
  1111. unsigned int idx, unsigned int chan_id)
  1112. {
  1113. int ret;
  1114. unsigned long flags;
  1115. struct scmi_raw_buffer *rb;
  1116. struct device *dev;
  1117. struct scmi_raw_queue *q;
  1118. struct scmi_raw_mode_info *raw = r;
  1119. if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
  1120. return;
  1121. dev = raw->handle->dev;
  1122. q = scmi_raw_queue_select(raw, idx,
  1123. SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
  1124. if (!q) {
  1125. dev_warn(dev,
  1126. "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
  1127. idx, chan_id);
  1128. return;
  1129. }
  1130. /*
  1131. * Grab the msg_q_lock upfront to avoid a possible race between
  1132. * realizing the free list was empty and effectively picking the next
  1133. * buffer to use from the oldest one enqueued and still unread on this
  1134. * msg_q.
  1135. *
  1136. * Note that nowhere else these locks are taken together, so no risk of
  1137. * deadlocks du eto inversion.
  1138. */
  1139. spin_lock_irqsave(&q->msg_q_lock, flags);
  1140. rb = scmi_raw_buffer_get(q);
  1141. if (!rb) {
  1142. /*
  1143. * Immediate and delayed replies to previously injected Raw
  1144. * commands MUST be read back from userspace to free the buffers:
  1145. * if this is not happening something is seriously broken and
  1146. * must be fixed at the application level: complain loudly.
  1147. */
  1148. if (idx == SCMI_RAW_REPLY_QUEUE) {
  1149. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  1150. dev_warn(dev,
  1151. "RAW[%d] - Buffers exhausted. Dropping report.\n",
  1152. idx);
  1153. return;
  1154. }
  1155. /*
  1156. * Notifications and errors queues are instead handled in a
  1157. * circular manner: unread old buffers are just overwritten by
  1158. * newer ones.
  1159. *
  1160. * The main reason for this is that notifications originated
  1161. * by Raw requests cannot be distinguished from normal ones, so
  1162. * your Raw buffers queues risk to be flooded and depleted by
  1163. * notifications if you left it mistakenly enabled or when in
  1164. * coexistence mode.
  1165. */
  1166. rb = scmi_raw_buffer_dequeue_unlocked(q);
  1167. if (WARN_ON(!rb)) {
  1168. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  1169. return;
  1170. }
  1171. /* Reset to full buffer length */
  1172. rb->msg.len = rb->max_len;
  1173. dev_warn_once(dev,
  1174. "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
  1175. idx);
  1176. }
  1177. spin_unlock_irqrestore(&q->msg_q_lock, flags);
  1178. ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
  1179. if (ret) {
  1180. dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
  1181. scmi_raw_buffer_put(q, rb);
  1182. return;
  1183. }
  1184. scmi_raw_buffer_enqueue(q, rb);
  1185. }
  1186. static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
  1187. struct scmi_chan_info *cinfo,
  1188. struct scmi_xfer *xfer, u32 msg_hdr)
  1189. {
  1190. /* Unpack received HDR as it is */
  1191. unpack_scmi_header(msg_hdr, &xfer->hdr);
  1192. xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
  1193. memset(xfer->rx.buf, 0x00, xfer->rx.len);
  1194. raw->desc->ops->fetch_response(cinfo, xfer);
  1195. }
  1196. /**
  1197. * scmi_raw_error_report - Helper to report back timed-out or generally
  1198. * unexpected replies.
  1199. *
  1200. * @r: An opaque reference to the raw instance configuration
  1201. * @cinfo: A reference to the channel to use to retrieve the broken xfer
  1202. * @msg_hdr: The SCMI message header of the message to fetch and report
  1203. * @priv: Any private data related to the xfer.
  1204. *
  1205. * If Raw mode is enabled, this is called from the SCMI core on the RX path in
  1206. * case of errors to save and enqueue the bad message payload carried by the
  1207. * message that has just been received.
  1208. *
  1209. * Note that we have to manually fetch any available payload into a temporary
  1210. * xfer to be able to save and enqueue the message, since the regular RX error
  1211. * path which had called this would have not fetched the message payload having
  1212. * classified it as an error.
  1213. */
  1214. void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
  1215. u32 msg_hdr, void *priv)
  1216. {
  1217. struct scmi_xfer xfer;
  1218. struct scmi_raw_mode_info *raw = r;
  1219. if (!raw)
  1220. return;
  1221. xfer.rx.len = raw->desc->max_msg_size;
  1222. xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC);
  1223. if (!xfer.rx.buf) {
  1224. dev_info(raw->handle->dev,
  1225. "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
  1226. msg_hdr);
  1227. return;
  1228. }
  1229. /* Any transport-provided priv must be passed back down to transport */
  1230. if (priv)
  1231. /* Ensure priv is visible */
  1232. smp_store_mb(xfer.priv, priv);
  1233. scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
  1234. scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);
  1235. kfree(xfer.rx.buf);
  1236. }