bus.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
  4. * Intel Management Engine Interface (Intel MEI) Linux driver
  5. */
  6. #include <linux/module.h>
  7. #include <linux/device.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched/signal.h>
  10. #include <linux/init.h>
  11. #include <linux/errno.h>
  12. #include <linux/slab.h>
  13. #include <linux/mutex.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/mei_cl_bus.h>
  17. #include "mei_dev.h"
  18. #include "client.h"
  19. #define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver)
  20. /**
  21. * __mei_cl_send - internal client send (write)
  22. *
  23. * @cl: host client
  24. * @buf: buffer to send
  25. * @length: buffer length
  26. * @vtag: virtual tag
  27. * @mode: sending mode
  28. *
  29. * Return: written size bytes or < 0 on error
  30. */
  31. ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
  32. unsigned int mode)
  33. {
  34. return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
  35. }
  36. /**
  37. * __mei_cl_send_timeout - internal client send (write)
  38. *
  39. * @cl: host client
  40. * @buf: buffer to send
  41. * @length: buffer length
  42. * @vtag: virtual tag
  43. * @mode: sending mode
  44. * @timeout: send timeout in milliseconds.
  45. * effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
  46. * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
  47. *
  48. * Return: written size bytes or < 0 on error
  49. */
  50. ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
  51. unsigned int mode, unsigned long timeout)
  52. {
  53. struct mei_device *bus;
  54. struct mei_cl_cb *cb;
  55. ssize_t rets;
  56. if (WARN_ON(!cl || !cl->dev))
  57. return -ENODEV;
  58. bus = cl->dev;
  59. mutex_lock(&bus->device_lock);
  60. if (bus->dev_state != MEI_DEV_ENABLED &&
  61. bus->dev_state != MEI_DEV_POWERING_DOWN) {
  62. rets = -ENODEV;
  63. goto out;
  64. }
  65. if (!mei_cl_is_connected(cl)) {
  66. rets = -ENODEV;
  67. goto out;
  68. }
  69. /* Check if we have an ME client device */
  70. if (!mei_me_cl_is_active(cl->me_cl)) {
  71. rets = -ENOTTY;
  72. goto out;
  73. }
  74. if (vtag) {
  75. /* Check if vtag is supported by client */
  76. rets = mei_cl_vt_support_check(cl);
  77. if (rets)
  78. goto out;
  79. }
  80. if (length > mei_cl_mtu(cl)) {
  81. rets = -EFBIG;
  82. goto out;
  83. }
  84. while (cl->tx_cb_queued >= bus->tx_queue_limit) {
  85. mutex_unlock(&bus->device_lock);
  86. rets = wait_event_interruptible(cl->tx_wait,
  87. cl->writing_state == MEI_WRITE_COMPLETE ||
  88. (!mei_cl_is_connected(cl)));
  89. mutex_lock(&bus->device_lock);
  90. if (rets) {
  91. if (signal_pending(current))
  92. rets = -EINTR;
  93. goto out;
  94. }
  95. if (!mei_cl_is_connected(cl)) {
  96. rets = -ENODEV;
  97. goto out;
  98. }
  99. }
  100. cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
  101. if (!cb) {
  102. rets = -ENOMEM;
  103. goto out;
  104. }
  105. cb->vtag = vtag;
  106. cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
  107. cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
  108. memcpy(cb->buf.data, buf, length);
  109. /* hack we point data to header */
  110. if (mode & MEI_CL_IO_SGL) {
  111. cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
  112. cb->buf.data = NULL;
  113. cb->buf.size = 0;
  114. }
  115. rets = mei_cl_write(cl, cb, timeout);
  116. if (mode & MEI_CL_IO_SGL && rets == 0)
  117. rets = length;
  118. out:
  119. mutex_unlock(&bus->device_lock);
  120. return rets;
  121. }
  122. /**
  123. * __mei_cl_recv - internal client receive (read)
  124. *
  125. * @cl: host client
  126. * @buf: buffer to receive
  127. * @length: buffer length
  128. * @mode: io mode
  129. * @vtag: virtual tag
  130. * @timeout: recv timeout, 0 for infinite timeout
  131. *
  132. * Return: read size in bytes of < 0 on error
  133. */
  134. ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
  135. unsigned int mode, unsigned long timeout)
  136. {
  137. struct mei_device *bus;
  138. struct mei_cl_cb *cb;
  139. size_t r_length;
  140. ssize_t rets;
  141. bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
  142. if (WARN_ON(!cl || !cl->dev))
  143. return -ENODEV;
  144. bus = cl->dev;
  145. mutex_lock(&bus->device_lock);
  146. if (bus->dev_state != MEI_DEV_ENABLED &&
  147. bus->dev_state != MEI_DEV_POWERING_DOWN) {
  148. rets = -ENODEV;
  149. goto out;
  150. }
  151. cb = mei_cl_read_cb(cl, NULL);
  152. if (cb)
  153. goto copy;
  154. rets = mei_cl_read_start(cl, length, NULL);
  155. if (rets && rets != -EBUSY)
  156. goto out;
  157. if (nonblock) {
  158. rets = -EAGAIN;
  159. goto out;
  160. }
  161. /* wait on event only if there is no other waiter */
  162. /* synchronized under device mutex */
  163. if (!waitqueue_active(&cl->rx_wait)) {
  164. mutex_unlock(&bus->device_lock);
  165. if (timeout) {
  166. rets = wait_event_interruptible_timeout
  167. (cl->rx_wait,
  168. mei_cl_read_cb(cl, NULL) ||
  169. (!mei_cl_is_connected(cl)),
  170. msecs_to_jiffies(timeout));
  171. if (rets == 0)
  172. return -ETIME;
  173. if (rets < 0) {
  174. if (signal_pending(current))
  175. return -EINTR;
  176. return -ERESTARTSYS;
  177. }
  178. } else {
  179. if (wait_event_interruptible
  180. (cl->rx_wait,
  181. mei_cl_read_cb(cl, NULL) ||
  182. (!mei_cl_is_connected(cl)))) {
  183. if (signal_pending(current))
  184. return -EINTR;
  185. return -ERESTARTSYS;
  186. }
  187. }
  188. mutex_lock(&bus->device_lock);
  189. if (!mei_cl_is_connected(cl)) {
  190. rets = -ENODEV;
  191. goto out;
  192. }
  193. }
  194. cb = mei_cl_read_cb(cl, NULL);
  195. if (!cb) {
  196. rets = 0;
  197. goto out;
  198. }
  199. copy:
  200. if (cb->status) {
  201. rets = cb->status;
  202. goto free;
  203. }
  204. /* for the GSC type - copy the extended header to the buffer */
  205. if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
  206. r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
  207. memcpy(buf, cb->ext_hdr, r_length);
  208. } else {
  209. r_length = min_t(size_t, length, cb->buf_idx);
  210. memcpy(buf, cb->buf.data, r_length);
  211. }
  212. rets = r_length;
  213. if (vtag)
  214. *vtag = cb->vtag;
  215. free:
  216. mei_cl_del_rd_completed(cl, cb);
  217. out:
  218. mutex_unlock(&bus->device_lock);
  219. return rets;
  220. }
  221. /**
  222. * mei_cldev_send_vtag - me device send with vtag (write)
  223. *
  224. * @cldev: me client device
  225. * @buf: buffer to send
  226. * @length: buffer length
  227. * @vtag: virtual tag
  228. *
  229. * Return:
  230. * * written size in bytes
  231. * * < 0 on error
  232. */
  233. ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
  234. size_t length, u8 vtag)
  235. {
  236. struct mei_cl *cl = cldev->cl;
  237. return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
  238. }
  239. EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
  240. /**
  241. * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write)
  242. *
  243. * @cldev: me client device
  244. * @buf: buffer to send
  245. * @length: buffer length
  246. * @vtag: virtual tag
  247. * @timeout: send timeout in milliseconds, 0 for infinite timeout
  248. *
  249. * Return:
  250. * * written size in bytes
  251. * * < 0 on error
  252. */
  253. ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
  254. size_t length, u8 vtag, unsigned long timeout)
  255. {
  256. struct mei_cl *cl = cldev->cl;
  257. return __mei_cl_send_timeout(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING, timeout);
  258. }
  259. EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout);
  260. /**
  261. * mei_cldev_recv_vtag - client receive with vtag (read)
  262. *
  263. * @cldev: me client device
  264. * @buf: buffer to receive
  265. * @length: buffer length
  266. * @vtag: virtual tag
  267. *
  268. * Return:
  269. * * read size in bytes
  270. * * < 0 on error
  271. */
  272. ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
  273. u8 *vtag)
  274. {
  275. struct mei_cl *cl = cldev->cl;
  276. return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
  277. }
  278. EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
  279. /**
  280. * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
  281. *
  282. * @cldev: me client device
  283. * @buf: buffer to receive
  284. * @length: buffer length
  285. * @vtag: virtual tag
  286. *
  287. * Return:
  288. * * read size in bytes
  289. * * -EAGAIN if function will block.
  290. * * < 0 on other error
  291. */
  292. ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
  293. size_t length, u8 *vtag)
  294. {
  295. struct mei_cl *cl = cldev->cl;
  296. return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
  297. }
  298. EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
  299. /**
  300. * mei_cldev_recv_timeout - client receive with timeout (read)
  301. *
  302. * @cldev: me client device
  303. * @buf: buffer to receive
  304. * @length: buffer length
  305. * @timeout: send timeout in milliseconds, 0 for infinite timeout
  306. *
  307. * Return:
  308. * * read size in bytes
  309. * * < 0 on error
  310. */
  311. ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
  312. unsigned long timeout)
  313. {
  314. return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout);
  315. }
  316. EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout);
  317. /**
  318. * mei_cldev_recv_vtag_timeout - client receive with vtag (read)
  319. *
  320. * @cldev: me client device
  321. * @buf: buffer to receive
  322. * @length: buffer length
  323. * @vtag: virtual tag
  324. * @timeout: recv timeout in milliseconds, 0 for infinite timeout
  325. *
  326. * Return:
  327. * * read size in bytes
  328. * * < 0 on error
  329. */
  330. ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
  331. u8 *vtag, unsigned long timeout)
  332. {
  333. struct mei_cl *cl = cldev->cl;
  334. return __mei_cl_recv(cl, buf, length, vtag, 0, timeout);
  335. }
  336. EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout);
  337. /**
  338. * mei_cldev_send - me device send (write)
  339. *
  340. * @cldev: me client device
  341. * @buf: buffer to send
  342. * @length: buffer length
  343. *
  344. * Return:
  345. * * written size in bytes
  346. * * < 0 on error
  347. */
  348. ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
  349. {
  350. return mei_cldev_send_vtag(cldev, buf, length, 0);
  351. }
  352. EXPORT_SYMBOL_GPL(mei_cldev_send);
  353. /**
  354. * mei_cldev_send_timeout - me device send with timeout (write)
  355. *
  356. * @cldev: me client device
  357. * @buf: buffer to send
  358. * @length: buffer length
  359. * @timeout: send timeout in milliseconds, 0 for infinite timeout
  360. *
  361. * Return:
  362. * * written size in bytes
  363. * * < 0 on error
  364. */
  365. ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length,
  366. unsigned long timeout)
  367. {
  368. return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout);
  369. }
  370. EXPORT_SYMBOL_GPL(mei_cldev_send_timeout);
  371. /**
  372. * mei_cldev_recv - client receive (read)
  373. *
  374. * @cldev: me client device
  375. * @buf: buffer to receive
  376. * @length: buffer length
  377. *
  378. * Return: read size in bytes of < 0 on error
  379. */
  380. ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
  381. {
  382. return mei_cldev_recv_vtag(cldev, buf, length, NULL);
  383. }
  384. EXPORT_SYMBOL_GPL(mei_cldev_recv);
  385. /**
  386. * mei_cldev_recv_nonblock - non block client receive (read)
  387. *
  388. * @cldev: me client device
  389. * @buf: buffer to receive
  390. * @length: buffer length
  391. *
  392. * Return: read size in bytes of < 0 on error
  393. * -EAGAIN if function will block.
  394. */
  395. ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
  396. size_t length)
  397. {
  398. return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
  399. }
  400. EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
  401. /**
  402. * mei_cl_bus_rx_work - dispatch rx event for a bus device
  403. *
  404. * @work: work
  405. */
  406. static void mei_cl_bus_rx_work(struct work_struct *work)
  407. {
  408. struct mei_cl_device *cldev;
  409. struct mei_device *bus;
  410. cldev = container_of(work, struct mei_cl_device, rx_work);
  411. bus = cldev->bus;
  412. if (cldev->rx_cb)
  413. cldev->rx_cb(cldev);
  414. mutex_lock(&bus->device_lock);
  415. if (mei_cl_is_connected(cldev->cl))
  416. mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
  417. mutex_unlock(&bus->device_lock);
  418. }
  419. /**
  420. * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
  421. *
  422. * @work: work
  423. */
  424. static void mei_cl_bus_notif_work(struct work_struct *work)
  425. {
  426. struct mei_cl_device *cldev;
  427. cldev = container_of(work, struct mei_cl_device, notif_work);
  428. if (cldev->notif_cb)
  429. cldev->notif_cb(cldev);
  430. }
  431. /**
  432. * mei_cl_bus_notify_event - schedule notify cb on bus client
  433. *
  434. * @cl: host client
  435. *
  436. * Return: true if event was scheduled
  437. * false if the client is not waiting for event
  438. */
  439. bool mei_cl_bus_notify_event(struct mei_cl *cl)
  440. {
  441. struct mei_cl_device *cldev = cl->cldev;
  442. if (!cldev || !cldev->notif_cb)
  443. return false;
  444. if (!cl->notify_ev)
  445. return false;
  446. schedule_work(&cldev->notif_work);
  447. cl->notify_ev = false;
  448. return true;
  449. }
  450. /**
  451. * mei_cl_bus_rx_event - schedule rx event
  452. *
  453. * @cl: host client
  454. *
  455. * Return: true if event was scheduled
  456. * false if the client is not waiting for event
  457. */
  458. bool mei_cl_bus_rx_event(struct mei_cl *cl)
  459. {
  460. struct mei_cl_device *cldev = cl->cldev;
  461. if (!cldev || !cldev->rx_cb)
  462. return false;
  463. schedule_work(&cldev->rx_work);
  464. return true;
  465. }
  466. /**
  467. * mei_cldev_register_rx_cb - register Rx event callback
  468. *
  469. * @cldev: me client devices
  470. * @rx_cb: callback function
  471. *
  472. * Return: 0 on success
  473. * -EALREADY if an callback is already registered
  474. * <0 on other errors
  475. */
  476. int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
  477. {
  478. struct mei_device *bus = cldev->bus;
  479. int ret;
  480. if (!rx_cb)
  481. return -EINVAL;
  482. if (cldev->rx_cb)
  483. return -EALREADY;
  484. cldev->rx_cb = rx_cb;
  485. INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
  486. mutex_lock(&bus->device_lock);
  487. if (mei_cl_is_connected(cldev->cl))
  488. ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
  489. else
  490. ret = -ENODEV;
  491. mutex_unlock(&bus->device_lock);
  492. if (ret && ret != -EBUSY) {
  493. cancel_work_sync(&cldev->rx_work);
  494. cldev->rx_cb = NULL;
  495. return ret;
  496. }
  497. return 0;
  498. }
  499. EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
  500. /**
  501. * mei_cldev_register_notif_cb - register FW notification event callback
  502. *
  503. * @cldev: me client devices
  504. * @notif_cb: callback function
  505. *
  506. * Return: 0 on success
  507. * -EALREADY if an callback is already registered
  508. * <0 on other errors
  509. */
  510. int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
  511. mei_cldev_cb_t notif_cb)
  512. {
  513. struct mei_device *bus = cldev->bus;
  514. int ret;
  515. if (!notif_cb)
  516. return -EINVAL;
  517. if (cldev->notif_cb)
  518. return -EALREADY;
  519. cldev->notif_cb = notif_cb;
  520. INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
  521. mutex_lock(&bus->device_lock);
  522. ret = mei_cl_notify_request(cldev->cl, NULL, 1);
  523. mutex_unlock(&bus->device_lock);
  524. if (ret) {
  525. cancel_work_sync(&cldev->notif_work);
  526. cldev->notif_cb = NULL;
  527. return ret;
  528. }
  529. return 0;
  530. }
  531. EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
  532. /**
  533. * mei_cldev_get_drvdata - driver data getter
  534. *
  535. * @cldev: mei client device
  536. *
  537. * Return: driver private data
  538. */
  539. void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
  540. {
  541. return dev_get_drvdata(&cldev->dev);
  542. }
  543. EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
  544. /**
  545. * mei_cldev_set_drvdata - driver data setter
  546. *
  547. * @cldev: mei client device
  548. * @data: data to store
  549. */
  550. void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
  551. {
  552. dev_set_drvdata(&cldev->dev, data);
  553. }
  554. EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
  555. /**
  556. * mei_cldev_uuid - return uuid of the underlying me client
  557. *
  558. * @cldev: mei client device
  559. *
  560. * Return: me client uuid
  561. */
  562. const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
  563. {
  564. return mei_me_cl_uuid(cldev->me_cl);
  565. }
  566. EXPORT_SYMBOL_GPL(mei_cldev_uuid);
  567. /**
  568. * mei_cldev_ver - return protocol version of the underlying me client
  569. *
  570. * @cldev: mei client device
  571. *
  572. * Return: me client protocol version
  573. */
  574. u8 mei_cldev_ver(const struct mei_cl_device *cldev)
  575. {
  576. return mei_me_cl_ver(cldev->me_cl);
  577. }
  578. EXPORT_SYMBOL_GPL(mei_cldev_ver);
  579. /**
  580. * mei_cldev_enabled - check whether the device is enabled
  581. *
  582. * @cldev: mei client device
  583. *
  584. * Return: true if me client is initialized and connected
  585. */
  586. bool mei_cldev_enabled(const struct mei_cl_device *cldev)
  587. {
  588. return mei_cl_is_connected(cldev->cl);
  589. }
  590. EXPORT_SYMBOL_GPL(mei_cldev_enabled);
  591. /**
  592. * mei_cl_bus_module_get - acquire module of the underlying
  593. * hw driver.
  594. *
  595. * @cldev: mei client device
  596. *
  597. * Return: true on success; false if the module was removed.
  598. */
  599. static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
  600. {
  601. return try_module_get(cldev->bus->dev->driver->owner);
  602. }
  603. /**
  604. * mei_cl_bus_module_put - release the underlying hw module.
  605. *
  606. * @cldev: mei client device
  607. */
  608. static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
  609. {
  610. module_put(cldev->bus->dev->driver->owner);
  611. }
  612. /**
  613. * mei_cl_bus_vtag - get bus vtag entry wrapper
  614. * The tag for bus client is always first.
  615. *
  616. * @cl: host client
  617. *
  618. * Return: bus vtag or NULL
  619. */
  620. static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
  621. {
  622. return list_first_entry_or_null(&cl->vtag_map,
  623. struct mei_cl_vtag, list);
  624. }
  625. /**
  626. * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
  627. *
  628. * @cldev: me client device
  629. *
  630. * Return:
  631. * * 0 on success
  632. * * -ENOMEM if memory allocation failed
  633. */
  634. static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
  635. {
  636. struct mei_cl *cl = cldev->cl;
  637. struct mei_cl_vtag *cl_vtag;
  638. /*
  639. * Bail out if the client does not supports vtags
  640. * or has already allocated one
  641. */
  642. if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
  643. return 0;
  644. cl_vtag = mei_cl_vtag_alloc(NULL, 0);
  645. if (IS_ERR(cl_vtag))
  646. return -ENOMEM;
  647. list_add_tail(&cl_vtag->list, &cl->vtag_map);
  648. return 0;
  649. }
  650. /**
  651. * mei_cl_bus_vtag_free - remove the bus entry from vtag map
  652. *
  653. * @cldev: me client device
  654. */
  655. static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
  656. {
  657. struct mei_cl *cl = cldev->cl;
  658. struct mei_cl_vtag *cl_vtag;
  659. cl_vtag = mei_cl_bus_vtag(cl);
  660. if (!cl_vtag)
  661. return;
  662. list_del(&cl_vtag->list);
  663. kfree(cl_vtag);
  664. }
  665. void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
  666. {
  667. struct mei_device *bus;
  668. struct mei_cl *cl;
  669. int ret;
  670. if (!cldev || !buffer_id || !size)
  671. return ERR_PTR(-EINVAL);
  672. if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
  673. dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
  674. MEI_FW_PAGE_SIZE);
  675. return ERR_PTR(-EINVAL);
  676. }
  677. cl = cldev->cl;
  678. bus = cldev->bus;
  679. mutex_lock(&bus->device_lock);
  680. if (cl->state == MEI_FILE_UNINITIALIZED) {
  681. ret = mei_cl_link(cl);
  682. if (ret)
  683. goto notlinked;
  684. /* update pointers */
  685. cl->cldev = cldev;
  686. }
  687. ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
  688. if (ret)
  689. mei_cl_unlink(cl);
  690. notlinked:
  691. mutex_unlock(&bus->device_lock);
  692. if (ret)
  693. return ERR_PTR(ret);
  694. return cl->dma.vaddr;
  695. }
  696. EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
  697. int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
  698. {
  699. struct mei_device *bus;
  700. struct mei_cl *cl;
  701. int ret;
  702. if (!cldev)
  703. return -EINVAL;
  704. cl = cldev->cl;
  705. bus = cldev->bus;
  706. mutex_lock(&bus->device_lock);
  707. ret = mei_cl_dma_unmap(cl, NULL);
  708. mei_cl_flush_queues(cl, NULL);
  709. mei_cl_unlink(cl);
  710. mutex_unlock(&bus->device_lock);
  711. return ret;
  712. }
  713. EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
  714. /**
  715. * mei_cldev_enable - enable me client device
  716. * create connection with me client
  717. *
  718. * @cldev: me client device
  719. *
  720. * Return: 0 on success and < 0 on error
  721. */
  722. int mei_cldev_enable(struct mei_cl_device *cldev)
  723. {
  724. struct mei_device *bus = cldev->bus;
  725. struct mei_cl *cl;
  726. int ret;
  727. cl = cldev->cl;
  728. mutex_lock(&bus->device_lock);
  729. if (cl->state == MEI_FILE_UNINITIALIZED) {
  730. ret = mei_cl_link(cl);
  731. if (ret)
  732. goto notlinked;
  733. /* update pointers */
  734. cl->cldev = cldev;
  735. }
  736. if (mei_cl_is_connected(cl)) {
  737. ret = 0;
  738. goto out;
  739. }
  740. if (!mei_me_cl_is_active(cldev->me_cl)) {
  741. dev_err(&cldev->dev, "me client is not active\n");
  742. ret = -ENOTTY;
  743. goto out;
  744. }
  745. ret = mei_cl_bus_vtag_alloc(cldev);
  746. if (ret)
  747. goto out;
  748. ret = mei_cl_connect(cl, cldev->me_cl, NULL);
  749. if (ret < 0) {
  750. dev_err(&cldev->dev, "cannot connect\n");
  751. mei_cl_bus_vtag_free(cldev);
  752. }
  753. out:
  754. if (ret)
  755. mei_cl_unlink(cl);
  756. notlinked:
  757. mutex_unlock(&bus->device_lock);
  758. return ret;
  759. }
  760. EXPORT_SYMBOL_GPL(mei_cldev_enable);
  761. /**
  762. * mei_cldev_unregister_callbacks - internal wrapper for unregistering
  763. * callbacks.
  764. *
  765. * @cldev: client device
  766. */
  767. static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
  768. {
  769. if (cldev->rx_cb) {
  770. cancel_work_sync(&cldev->rx_work);
  771. cldev->rx_cb = NULL;
  772. }
  773. if (cldev->notif_cb) {
  774. cancel_work_sync(&cldev->notif_work);
  775. cldev->notif_cb = NULL;
  776. }
  777. }
  778. /**
  779. * mei_cldev_disable - disable me client device
  780. * disconnect form the me client
  781. *
  782. * @cldev: me client device
  783. *
  784. * Return: 0 on success and < 0 on error
  785. */
  786. int mei_cldev_disable(struct mei_cl_device *cldev)
  787. {
  788. struct mei_device *bus;
  789. struct mei_cl *cl;
  790. int err;
  791. if (!cldev)
  792. return -ENODEV;
  793. cl = cldev->cl;
  794. bus = cldev->bus;
  795. mei_cldev_unregister_callbacks(cldev);
  796. mutex_lock(&bus->device_lock);
  797. mei_cl_bus_vtag_free(cldev);
  798. if (!mei_cl_is_connected(cl)) {
  799. dev_dbg(bus->dev, "Already disconnected\n");
  800. err = 0;
  801. goto out;
  802. }
  803. err = mei_cl_disconnect(cl);
  804. if (err < 0)
  805. dev_err(bus->dev, "Could not disconnect from the ME client\n");
  806. out:
  807. /* Flush queues and remove any pending read unless we have mapped DMA */
  808. if (!cl->dma_mapped) {
  809. mei_cl_flush_queues(cl, NULL);
  810. mei_cl_unlink(cl);
  811. }
  812. mutex_unlock(&bus->device_lock);
  813. return err;
  814. }
  815. EXPORT_SYMBOL_GPL(mei_cldev_disable);
  816. /**
  817. * mei_cldev_send_gsc_command - sends a gsc command, by sending
  818. * a gsl mei message to gsc and receiving reply from gsc
  819. *
  820. * @cldev: me client device
  821. * @client_id: client id to send the command to
  822. * @fence_id: fence id to send the command to
  823. * @sg_in: scatter gather list containing addresses for rx message buffer
  824. * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
  825. * @sg_out: scatter gather list containing addresses for tx message buffer
  826. *
  827. * Return:
  828. * * written size in bytes
  829. * * < 0 on error
  830. */
  831. ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
  832. u8 client_id, u32 fence_id,
  833. struct scatterlist *sg_in,
  834. size_t total_in_len,
  835. struct scatterlist *sg_out)
  836. {
  837. struct mei_cl *cl;
  838. struct mei_device *bus;
  839. ssize_t ret = 0;
  840. struct mei_ext_hdr_gsc_h2f *ext_hdr;
  841. size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
  842. int sg_out_nents, sg_in_nents;
  843. int i;
  844. struct scatterlist *sg;
  845. struct mei_ext_hdr_gsc_f2h rx_msg;
  846. unsigned int sg_len;
  847. if (!cldev || !sg_in || !sg_out)
  848. return -EINVAL;
  849. cl = cldev->cl;
  850. bus = cldev->bus;
  851. dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
  852. if (!bus->hbm_f_gsc_supported)
  853. return -EOPNOTSUPP;
  854. sg_out_nents = sg_nents(sg_out);
  855. sg_in_nents = sg_nents(sg_in);
  856. /* at least one entry in tx and rx sgls must be present */
  857. if (sg_out_nents <= 0 || sg_in_nents <= 0)
  858. return -EINVAL;
  859. buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
  860. ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
  861. if (!ext_hdr)
  862. return -ENOMEM;
  863. /* construct the GSC message */
  864. ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
  865. ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
  866. ext_hdr->client_id = client_id;
  867. ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
  868. ext_hdr->fence_id = fence_id;
  869. ext_hdr->input_address_count = sg_in_nents;
  870. ext_hdr->output_address_count = sg_out_nents;
  871. ext_hdr->reserved[0] = 0;
  872. ext_hdr->reserved[1] = 0;
  873. /* copy in-sgl to the message */
  874. for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
  875. ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
  876. ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
  877. sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
  878. ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
  879. total_in_len -= ext_hdr->sgl[i].length;
  880. }
  881. /* copy out-sgl to the message */
  882. for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
  883. ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
  884. ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
  885. sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
  886. ext_hdr->sgl[i].length = sg_len;
  887. }
  888. /* send the message to GSC */
  889. ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
  890. if (ret < 0) {
  891. dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret);
  892. goto end;
  893. }
  894. if (ret != buf_sz) {
  895. dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
  896. ret, buf_sz);
  897. ret = -EIO;
  898. goto end;
  899. }
  900. /* receive the reply from GSC, note that at this point sg_in should contain the reply */
  901. ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
  902. if (ret != sizeof(rx_msg)) {
  903. dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
  904. ret, sizeof(rx_msg));
  905. if (ret >= 0)
  906. ret = -EIO;
  907. goto end;
  908. }
  909. /* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
  910. if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
  911. dev_err(bus->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n",
  912. rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
  913. ret = -EFAULT;
  914. goto end;
  915. }
  916. dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n", rx_msg.written);
  917. ret = rx_msg.written;
  918. end:
  919. kfree(ext_hdr);
  920. return ret;
  921. }
  922. EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
  923. /**
  924. * mei_cl_device_find - find matching entry in the driver id table
  925. *
  926. * @cldev: me client device
  927. * @cldrv: me client driver
  928. *
  929. * Return: id on success; NULL if no id is matching
  930. */
  931. static const
  932. struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
  933. const struct mei_cl_driver *cldrv)
  934. {
  935. const struct mei_cl_device_id *id;
  936. const uuid_le *uuid;
  937. u8 version;
  938. bool match;
  939. uuid = mei_me_cl_uuid(cldev->me_cl);
  940. version = mei_me_cl_ver(cldev->me_cl);
  941. id = cldrv->id_table;
  942. while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
  943. if (!uuid_le_cmp(*uuid, id->uuid)) {
  944. match = true;
  945. if (cldev->name[0])
  946. if (strncmp(cldev->name, id->name,
  947. sizeof(id->name)))
  948. match = false;
  949. if (id->version != MEI_CL_VERSION_ANY)
  950. if (id->version != version)
  951. match = false;
  952. if (match)
  953. return id;
  954. }
  955. id++;
  956. }
  957. return NULL;
  958. }
  959. /**
  960. * mei_cl_device_match - device match function
  961. *
  962. * @dev: device
  963. * @drv: driver
  964. *
  965. * Return: 1 if matching device was found 0 otherwise
  966. */
  967. static int mei_cl_device_match(struct device *dev, const struct device_driver *drv)
  968. {
  969. const struct mei_cl_device *cldev = to_mei_cl_device(dev);
  970. const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
  971. const struct mei_cl_device_id *found_id;
  972. if (!cldev->do_match)
  973. return 0;
  974. if (!cldrv || !cldrv->id_table)
  975. return 0;
  976. found_id = mei_cl_device_find(cldev, cldrv);
  977. if (found_id)
  978. return 1;
  979. return 0;
  980. }
  981. /**
  982. * mei_cl_device_probe - bus probe function
  983. *
  984. * @dev: device
  985. *
  986. * Return: 0 on success; < 0 otherwise
  987. */
  988. static int mei_cl_device_probe(struct device *dev)
  989. {
  990. struct mei_cl_device *cldev;
  991. struct mei_cl_driver *cldrv;
  992. const struct mei_cl_device_id *id;
  993. int ret;
  994. cldev = to_mei_cl_device(dev);
  995. cldrv = to_mei_cl_driver(dev->driver);
  996. if (!cldrv || !cldrv->probe)
  997. return -ENODEV;
  998. id = mei_cl_device_find(cldev, cldrv);
  999. if (!id)
  1000. return -ENODEV;
  1001. if (!mei_cl_bus_module_get(cldev)) {
  1002. dev_err(&cldev->dev, "get hw module failed");
  1003. return -ENODEV;
  1004. }
  1005. ret = cldrv->probe(cldev, id);
  1006. if (ret) {
  1007. mei_cl_bus_module_put(cldev);
  1008. return ret;
  1009. }
  1010. __module_get(THIS_MODULE);
  1011. return 0;
  1012. }
  1013. /**
  1014. * mei_cl_device_remove - remove device from the bus
  1015. *
  1016. * @dev: device
  1017. *
  1018. * Return: 0 on success; < 0 otherwise
  1019. */
  1020. static void mei_cl_device_remove(struct device *dev)
  1021. {
  1022. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1023. struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
  1024. if (cldrv->remove)
  1025. cldrv->remove(cldev);
  1026. mei_cldev_unregister_callbacks(cldev);
  1027. mei_cl_bus_module_put(cldev);
  1028. module_put(THIS_MODULE);
  1029. }
  1030. static ssize_t name_show(struct device *dev, struct device_attribute *a,
  1031. char *buf)
  1032. {
  1033. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1034. return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
  1035. }
  1036. static DEVICE_ATTR_RO(name);
  1037. static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
  1038. char *buf)
  1039. {
  1040. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1041. const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
  1042. return sprintf(buf, "%pUl", uuid);
  1043. }
  1044. static DEVICE_ATTR_RO(uuid);
  1045. static ssize_t version_show(struct device *dev, struct device_attribute *a,
  1046. char *buf)
  1047. {
  1048. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1049. u8 version = mei_me_cl_ver(cldev->me_cl);
  1050. return sprintf(buf, "%02X", version);
  1051. }
  1052. static DEVICE_ATTR_RO(version);
  1053. static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
  1054. char *buf)
  1055. {
  1056. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1057. const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
  1058. u8 version = mei_me_cl_ver(cldev->me_cl);
  1059. return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
  1060. cldev->name, uuid, version);
  1061. }
  1062. static DEVICE_ATTR_RO(modalias);
  1063. static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
  1064. char *buf)
  1065. {
  1066. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1067. u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
  1068. return sprintf(buf, "%d", maxconn);
  1069. }
  1070. static DEVICE_ATTR_RO(max_conn);
  1071. static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
  1072. char *buf)
  1073. {
  1074. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1075. u8 fixed = mei_me_cl_fixed(cldev->me_cl);
  1076. return sprintf(buf, "%d", fixed);
  1077. }
  1078. static DEVICE_ATTR_RO(fixed);
  1079. static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
  1080. char *buf)
  1081. {
  1082. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1083. bool vt = mei_me_cl_vt(cldev->me_cl);
  1084. return sprintf(buf, "%d", vt);
  1085. }
  1086. static DEVICE_ATTR_RO(vtag);
  1087. static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
  1088. char *buf)
  1089. {
  1090. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1091. u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
  1092. return sprintf(buf, "%u", maxlen);
  1093. }
  1094. static DEVICE_ATTR_RO(max_len);
  1095. static struct attribute *mei_cldev_attrs[] = {
  1096. &dev_attr_name.attr,
  1097. &dev_attr_uuid.attr,
  1098. &dev_attr_version.attr,
  1099. &dev_attr_modalias.attr,
  1100. &dev_attr_max_conn.attr,
  1101. &dev_attr_fixed.attr,
  1102. &dev_attr_vtag.attr,
  1103. &dev_attr_max_len.attr,
  1104. NULL,
  1105. };
  1106. ATTRIBUTE_GROUPS(mei_cldev);
  1107. /**
  1108. * mei_cl_device_uevent - me client bus uevent handler
  1109. *
  1110. * @dev: device
  1111. * @env: uevent kobject
  1112. *
  1113. * Return: 0 on success -ENOMEM on when add_uevent_var fails
  1114. */
  1115. static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
  1116. {
  1117. const struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1118. const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
  1119. u8 version = mei_me_cl_ver(cldev->me_cl);
  1120. if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
  1121. return -ENOMEM;
  1122. if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
  1123. return -ENOMEM;
  1124. if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
  1125. return -ENOMEM;
  1126. if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
  1127. cldev->name, uuid, version))
  1128. return -ENOMEM;
  1129. return 0;
  1130. }
  1131. static const struct bus_type mei_cl_bus_type = {
  1132. .name = "mei",
  1133. .dev_groups = mei_cldev_groups,
  1134. .match = mei_cl_device_match,
  1135. .probe = mei_cl_device_probe,
  1136. .remove = mei_cl_device_remove,
  1137. .uevent = mei_cl_device_uevent,
  1138. };
  1139. static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
  1140. {
  1141. if (bus)
  1142. get_device(bus->dev);
  1143. return bus;
  1144. }
  1145. static void mei_dev_bus_put(struct mei_device *bus)
  1146. {
  1147. if (bus)
  1148. put_device(bus->dev);
  1149. }
  1150. static void mei_cl_bus_dev_release(struct device *dev)
  1151. {
  1152. struct mei_cl_device *cldev = to_mei_cl_device(dev);
  1153. mei_cl_flush_queues(cldev->cl, NULL);
  1154. mei_me_cl_put(cldev->me_cl);
  1155. mei_dev_bus_put(cldev->bus);
  1156. kfree(cldev->cl);
  1157. kfree(cldev);
  1158. }
  1159. static const struct device_type mei_cl_device_type = {
  1160. .release = mei_cl_bus_dev_release,
  1161. };
  1162. /**
  1163. * mei_cl_bus_set_name - set device name for me client device
  1164. * <controller>-<client device>
  1165. * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
  1166. *
  1167. * @cldev: me client device
  1168. */
  1169. static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
  1170. {
  1171. dev_set_name(&cldev->dev, "%s-%pUl",
  1172. dev_name(cldev->bus->dev),
  1173. mei_me_cl_uuid(cldev->me_cl));
  1174. }
  1175. /**
  1176. * mei_cl_bus_dev_alloc - initialize and allocate mei client device
  1177. *
  1178. * @bus: mei device
  1179. * @me_cl: me client
  1180. *
  1181. * Return: allocated device structure or NULL on allocation failure
  1182. */
  1183. static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
  1184. struct mei_me_client *me_cl)
  1185. {
  1186. struct mei_cl_device *cldev;
  1187. struct mei_cl *cl;
  1188. cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
  1189. if (!cldev)
  1190. return NULL;
  1191. cl = mei_cl_allocate(bus);
  1192. if (!cl) {
  1193. kfree(cldev);
  1194. return NULL;
  1195. }
  1196. device_initialize(&cldev->dev);
  1197. cldev->dev.parent = bus->dev;
  1198. cldev->dev.bus = &mei_cl_bus_type;
  1199. cldev->dev.type = &mei_cl_device_type;
  1200. cldev->bus = mei_dev_bus_get(bus);
  1201. cldev->me_cl = mei_me_cl_get(me_cl);
  1202. cldev->cl = cl;
  1203. mei_cl_bus_set_name(cldev);
  1204. cldev->is_added = 0;
  1205. INIT_LIST_HEAD(&cldev->bus_list);
  1206. device_enable_async_suspend(&cldev->dev);
  1207. return cldev;
  1208. }
  1209. /**
  1210. * mei_cl_bus_dev_setup - setup me client device
  1211. * run fix up routines and set the device name
  1212. *
  1213. * @bus: mei device
  1214. * @cldev: me client device
  1215. *
  1216. * Return: true if the device is eligible for enumeration
  1217. */
  1218. static bool mei_cl_bus_dev_setup(struct mei_device *bus,
  1219. struct mei_cl_device *cldev)
  1220. {
  1221. cldev->do_match = 1;
  1222. mei_cl_bus_dev_fixup(cldev);
  1223. /* the device name can change during fix up */
  1224. if (cldev->do_match)
  1225. mei_cl_bus_set_name(cldev);
  1226. return cldev->do_match == 1;
  1227. }
  1228. /**
  1229. * mei_cl_bus_dev_add - add me client devices
  1230. *
  1231. * @cldev: me client device
  1232. *
  1233. * Return: 0 on success; < 0 on failure
  1234. */
  1235. static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
  1236. {
  1237. int ret;
  1238. dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
  1239. mei_me_cl_uuid(cldev->me_cl),
  1240. mei_me_cl_ver(cldev->me_cl));
  1241. ret = device_add(&cldev->dev);
  1242. if (!ret)
  1243. cldev->is_added = 1;
  1244. return ret;
  1245. }
  1246. /**
  1247. * mei_cl_bus_dev_stop - stop the driver
  1248. *
  1249. * @cldev: me client device
  1250. */
  1251. static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
  1252. {
  1253. cldev->do_match = 0;
  1254. if (cldev->is_added)
  1255. device_release_driver(&cldev->dev);
  1256. }
  1257. /**
  1258. * mei_cl_bus_dev_destroy - destroy me client devices object
  1259. *
  1260. * @cldev: me client device
  1261. *
  1262. * Locking: called under "dev->cl_bus_lock" lock
  1263. */
  1264. static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
  1265. {
  1266. WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
  1267. if (!cldev->is_added)
  1268. return;
  1269. device_del(&cldev->dev);
  1270. list_del_init(&cldev->bus_list);
  1271. cldev->is_added = 0;
  1272. put_device(&cldev->dev);
  1273. }
  1274. /**
  1275. * mei_cl_bus_remove_device - remove a devices form the bus
  1276. *
  1277. * @cldev: me client device
  1278. */
  1279. static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
  1280. {
  1281. mei_cl_bus_dev_stop(cldev);
  1282. mei_cl_bus_dev_destroy(cldev);
  1283. }
  1284. /**
  1285. * mei_cl_bus_remove_devices - remove all devices form the bus
  1286. *
  1287. * @bus: mei device
  1288. */
  1289. void mei_cl_bus_remove_devices(struct mei_device *bus)
  1290. {
  1291. struct mei_cl_device *cldev, *next;
  1292. mutex_lock(&bus->cl_bus_lock);
  1293. list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
  1294. mei_cl_bus_remove_device(cldev);
  1295. mutex_unlock(&bus->cl_bus_lock);
  1296. }
  1297. /**
  1298. * mei_cl_bus_dev_init - allocate and initializes an mei client devices
  1299. * based on me client
  1300. *
  1301. * @bus: mei device
  1302. * @me_cl: me client
  1303. *
  1304. * Locking: called under "dev->cl_bus_lock" lock
  1305. */
  1306. static void mei_cl_bus_dev_init(struct mei_device *bus,
  1307. struct mei_me_client *me_cl)
  1308. {
  1309. struct mei_cl_device *cldev;
  1310. WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
  1311. dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
  1312. if (me_cl->bus_added)
  1313. return;
  1314. cldev = mei_cl_bus_dev_alloc(bus, me_cl);
  1315. if (!cldev)
  1316. return;
  1317. me_cl->bus_added = true;
  1318. list_add_tail(&cldev->bus_list, &bus->device_list);
  1319. }
  1320. /**
  1321. * mei_cl_bus_rescan - scan me clients list and add create
  1322. * devices for eligible clients
  1323. *
  1324. * @bus: mei device
  1325. */
  1326. static void mei_cl_bus_rescan(struct mei_device *bus)
  1327. {
  1328. struct mei_cl_device *cldev, *n;
  1329. struct mei_me_client *me_cl;
  1330. mutex_lock(&bus->cl_bus_lock);
  1331. down_read(&bus->me_clients_rwsem);
  1332. list_for_each_entry(me_cl, &bus->me_clients, list)
  1333. mei_cl_bus_dev_init(bus, me_cl);
  1334. up_read(&bus->me_clients_rwsem);
  1335. list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
  1336. if (!mei_me_cl_is_active(cldev->me_cl)) {
  1337. mei_cl_bus_remove_device(cldev);
  1338. continue;
  1339. }
  1340. if (cldev->is_added)
  1341. continue;
  1342. if (mei_cl_bus_dev_setup(bus, cldev))
  1343. mei_cl_bus_dev_add(cldev);
  1344. else {
  1345. list_del_init(&cldev->bus_list);
  1346. put_device(&cldev->dev);
  1347. }
  1348. }
  1349. mutex_unlock(&bus->cl_bus_lock);
  1350. dev_dbg(bus->dev, "rescan end");
  1351. }
  1352. void mei_cl_bus_rescan_work(struct work_struct *work)
  1353. {
  1354. struct mei_device *bus =
  1355. container_of(work, struct mei_device, bus_rescan_work);
  1356. mei_cl_bus_rescan(bus);
  1357. }
  1358. int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
  1359. struct module *owner)
  1360. {
  1361. int err;
  1362. cldrv->driver.name = cldrv->name;
  1363. cldrv->driver.owner = owner;
  1364. cldrv->driver.bus = &mei_cl_bus_type;
  1365. err = driver_register(&cldrv->driver);
  1366. if (err)
  1367. return err;
  1368. pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
  1369. return 0;
  1370. }
  1371. EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
  1372. void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
  1373. {
  1374. driver_unregister(&cldrv->driver);
  1375. pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
  1376. }
  1377. EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
  1378. int __init mei_cl_bus_init(void)
  1379. {
  1380. return bus_register(&mei_cl_bus_type);
  1381. }
  1382. void __exit mei_cl_bus_exit(void)
  1383. {
  1384. bus_unregister(&mei_cl_bus_type);
  1385. }