cec-api.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * cec-api.c - HDMI Consumer Electronics Control framework - API
  4. *
  5. * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  6. */
  7. #include <linux/errno.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/kmod.h>
  12. #include <linux/ktime.h>
  13. #include <linux/slab.h>
  14. #include <linux/mm.h>
  15. #include <linux/string.h>
  16. #include <linux/types.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/version.h>
  19. #include <media/cec-pin.h>
  20. #include "cec-priv.h"
  21. #include "cec-pin-priv.h"
  22. static inline struct cec_devnode *cec_devnode_data(struct file *filp)
  23. {
  24. struct cec_fh *fh = filp->private_data;
  25. return &fh->adap->devnode;
  26. }
  27. /* CEC file operations */
  28. static __poll_t cec_poll(struct file *filp,
  29. struct poll_table_struct *poll)
  30. {
  31. struct cec_fh *fh = filp->private_data;
  32. struct cec_adapter *adap = fh->adap;
  33. __poll_t res = 0;
  34. if (!cec_is_registered(adap))
  35. return EPOLLERR | EPOLLHUP;
  36. mutex_lock(&adap->lock);
  37. if (adap->is_configured &&
  38. adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
  39. res |= EPOLLOUT | EPOLLWRNORM;
  40. if (fh->queued_msgs)
  41. res |= EPOLLIN | EPOLLRDNORM;
  42. if (fh->total_queued_events)
  43. res |= EPOLLPRI;
  44. poll_wait(filp, &fh->wait, poll);
  45. mutex_unlock(&adap->lock);
  46. return res;
  47. }
  48. static bool cec_is_busy(const struct cec_adapter *adap,
  49. const struct cec_fh *fh)
  50. {
  51. bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
  52. bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
  53. /*
  54. * Exclusive initiators and followers can always access the CEC adapter
  55. */
  56. if (valid_initiator || valid_follower)
  57. return false;
  58. /*
  59. * All others can only access the CEC adapter if there is no
  60. * exclusive initiator and they are in INITIATOR mode.
  61. */
  62. return adap->cec_initiator ||
  63. fh->mode_initiator == CEC_MODE_NO_INITIATOR;
  64. }
  65. static long cec_adap_g_caps(struct cec_adapter *adap,
  66. struct cec_caps __user *parg)
  67. {
  68. struct cec_caps caps = {};
  69. strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
  70. sizeof(caps.driver));
  71. strlcpy(caps.name, adap->name, sizeof(caps.name));
  72. caps.available_log_addrs = adap->available_log_addrs;
  73. caps.capabilities = adap->capabilities;
  74. caps.version = LINUX_VERSION_CODE;
  75. if (copy_to_user(parg, &caps, sizeof(caps)))
  76. return -EFAULT;
  77. return 0;
  78. }
  79. static long cec_adap_g_phys_addr(struct cec_adapter *adap,
  80. __u16 __user *parg)
  81. {
  82. u16 phys_addr;
  83. mutex_lock(&adap->lock);
  84. phys_addr = adap->phys_addr;
  85. mutex_unlock(&adap->lock);
  86. if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
  87. return -EFAULT;
  88. return 0;
  89. }
  90. static int cec_validate_phys_addr(u16 phys_addr)
  91. {
  92. int i;
  93. if (phys_addr == CEC_PHYS_ADDR_INVALID)
  94. return 0;
  95. for (i = 0; i < 16; i += 4)
  96. if (phys_addr & (0xf << i))
  97. break;
  98. if (i == 16)
  99. return 0;
  100. for (i += 4; i < 16; i += 4)
  101. if ((phys_addr & (0xf << i)) == 0)
  102. return -EINVAL;
  103. return 0;
  104. }
  105. static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
  106. bool block, __u16 __user *parg)
  107. {
  108. u16 phys_addr;
  109. long err;
  110. if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
  111. return -ENOTTY;
  112. if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
  113. return -EFAULT;
  114. err = cec_validate_phys_addr(phys_addr);
  115. if (err)
  116. return err;
  117. mutex_lock(&adap->lock);
  118. if (cec_is_busy(adap, fh))
  119. err = -EBUSY;
  120. else
  121. __cec_s_phys_addr(adap, phys_addr, block);
  122. mutex_unlock(&adap->lock);
  123. return err;
  124. }
  125. static long cec_adap_g_log_addrs(struct cec_adapter *adap,
  126. struct cec_log_addrs __user *parg)
  127. {
  128. struct cec_log_addrs log_addrs;
  129. mutex_lock(&adap->lock);
  130. /*
  131. * We use memcpy here instead of assignment since there is a
  132. * hole at the end of struct cec_log_addrs that an assignment
  133. * might ignore. So when we do copy_to_user() we could leak
  134. * one byte of memory.
  135. */
  136. memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
  137. if (!adap->is_configured)
  138. memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
  139. sizeof(log_addrs.log_addr));
  140. mutex_unlock(&adap->lock);
  141. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  142. return -EFAULT;
  143. return 0;
  144. }
  145. static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
  146. bool block, struct cec_log_addrs __user *parg)
  147. {
  148. struct cec_log_addrs log_addrs;
  149. long err = -EBUSY;
  150. if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
  151. return -ENOTTY;
  152. if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
  153. return -EFAULT;
  154. log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
  155. CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
  156. CEC_LOG_ADDRS_FL_CDC_ONLY;
  157. mutex_lock(&adap->lock);
  158. if (!adap->is_configuring &&
  159. (!log_addrs.num_log_addrs || !adap->is_configured) &&
  160. !cec_is_busy(adap, fh)) {
  161. err = __cec_s_log_addrs(adap, &log_addrs, block);
  162. if (!err)
  163. log_addrs = adap->log_addrs;
  164. }
  165. mutex_unlock(&adap->lock);
  166. if (err)
  167. return err;
  168. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  169. return -EFAULT;
  170. return 0;
  171. }
  172. static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
  173. bool block, struct cec_msg __user *parg)
  174. {
  175. struct cec_msg msg = {};
  176. long err = 0;
  177. if (!(adap->capabilities & CEC_CAP_TRANSMIT))
  178. return -ENOTTY;
  179. if (copy_from_user(&msg, parg, sizeof(msg)))
  180. return -EFAULT;
  181. /* A CDC-Only device can only send CDC messages */
  182. if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
  183. (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
  184. return -EINVAL;
  185. mutex_lock(&adap->lock);
  186. if (adap->log_addrs.num_log_addrs == 0)
  187. err = -EPERM;
  188. else if (adap->is_configuring)
  189. err = -ENONET;
  190. else if (!adap->is_configured &&
  191. (adap->needs_hpd || msg.msg[0] != 0xf0))
  192. err = -ENONET;
  193. else if (cec_is_busy(adap, fh))
  194. err = -EBUSY;
  195. else
  196. err = cec_transmit_msg_fh(adap, &msg, fh, block);
  197. mutex_unlock(&adap->lock);
  198. if (err)
  199. return err;
  200. if (copy_to_user(parg, &msg, sizeof(msg)))
  201. return -EFAULT;
  202. return 0;
  203. }
  204. /* Called by CEC_RECEIVE: wait for a message to arrive */
  205. static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
  206. {
  207. u32 timeout = msg->timeout;
  208. int res;
  209. do {
  210. mutex_lock(&fh->lock);
  211. /* Are there received messages queued up? */
  212. if (fh->queued_msgs) {
  213. /* Yes, return the first one */
  214. struct cec_msg_entry *entry =
  215. list_first_entry(&fh->msgs,
  216. struct cec_msg_entry, list);
  217. list_del(&entry->list);
  218. *msg = entry->msg;
  219. kfree(entry);
  220. fh->queued_msgs--;
  221. mutex_unlock(&fh->lock);
  222. /* restore original timeout value */
  223. msg->timeout = timeout;
  224. return 0;
  225. }
  226. /* No, return EAGAIN in non-blocking mode or wait */
  227. mutex_unlock(&fh->lock);
  228. /* Return when in non-blocking mode */
  229. if (!block)
  230. return -EAGAIN;
  231. if (msg->timeout) {
  232. /* The user specified a timeout */
  233. res = wait_event_interruptible_timeout(fh->wait,
  234. fh->queued_msgs,
  235. msecs_to_jiffies(msg->timeout));
  236. if (res == 0)
  237. res = -ETIMEDOUT;
  238. else if (res > 0)
  239. res = 0;
  240. } else {
  241. /* Wait indefinitely */
  242. res = wait_event_interruptible(fh->wait,
  243. fh->queued_msgs);
  244. }
  245. /* Exit on error, otherwise loop to get the new message */
  246. } while (!res);
  247. return res;
  248. }
  249. static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
  250. bool block, struct cec_msg __user *parg)
  251. {
  252. struct cec_msg msg = {};
  253. long err;
  254. if (copy_from_user(&msg, parg, sizeof(msg)))
  255. return -EFAULT;
  256. err = cec_receive_msg(fh, &msg, block);
  257. if (err)
  258. return err;
  259. msg.flags = 0;
  260. if (copy_to_user(parg, &msg, sizeof(msg)))
  261. return -EFAULT;
  262. return 0;
  263. }
  264. static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
  265. bool block, struct cec_event __user *parg)
  266. {
  267. struct cec_event_entry *ev = NULL;
  268. u64 ts = ~0ULL;
  269. unsigned int i;
  270. unsigned int ev_idx;
  271. long err = 0;
  272. mutex_lock(&fh->lock);
  273. while (!fh->total_queued_events && block) {
  274. mutex_unlock(&fh->lock);
  275. err = wait_event_interruptible(fh->wait,
  276. fh->total_queued_events);
  277. if (err)
  278. return err;
  279. mutex_lock(&fh->lock);
  280. }
  281. /* Find the oldest event */
  282. for (i = 0; i < CEC_NUM_EVENTS; i++) {
  283. struct cec_event_entry *entry =
  284. list_first_entry_or_null(&fh->events[i],
  285. struct cec_event_entry, list);
  286. if (entry && entry->ev.ts <= ts) {
  287. ev = entry;
  288. ev_idx = i;
  289. ts = ev->ev.ts;
  290. }
  291. }
  292. if (!ev) {
  293. err = -EAGAIN;
  294. goto unlock;
  295. }
  296. list_del(&ev->list);
  297. if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
  298. err = -EFAULT;
  299. if (ev_idx >= CEC_NUM_CORE_EVENTS)
  300. kfree(ev);
  301. fh->queued_events[ev_idx]--;
  302. fh->total_queued_events--;
  303. unlock:
  304. mutex_unlock(&fh->lock);
  305. return err;
  306. }
  307. static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
  308. u32 __user *parg)
  309. {
  310. u32 mode = fh->mode_initiator | fh->mode_follower;
  311. if (copy_to_user(parg, &mode, sizeof(mode)))
  312. return -EFAULT;
  313. return 0;
  314. }
  315. static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
  316. u32 __user *parg)
  317. {
  318. u32 mode;
  319. u8 mode_initiator;
  320. u8 mode_follower;
  321. bool send_pin_event = false;
  322. long err = 0;
  323. if (copy_from_user(&mode, parg, sizeof(mode)))
  324. return -EFAULT;
  325. if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
  326. dprintk(1, "%s: invalid mode bits set\n", __func__);
  327. return -EINVAL;
  328. }
  329. mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
  330. mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
  331. if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
  332. mode_follower > CEC_MODE_MONITOR_ALL) {
  333. dprintk(1, "%s: unknown mode\n", __func__);
  334. return -EINVAL;
  335. }
  336. if (mode_follower == CEC_MODE_MONITOR_ALL &&
  337. !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
  338. dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
  339. return -EINVAL;
  340. }
  341. if (mode_follower == CEC_MODE_MONITOR_PIN &&
  342. !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
  343. dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
  344. return -EINVAL;
  345. }
  346. /* Follower modes should always be able to send CEC messages */
  347. if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
  348. !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
  349. mode_follower >= CEC_MODE_FOLLOWER &&
  350. mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  351. dprintk(1, "%s: cannot transmit\n", __func__);
  352. return -EINVAL;
  353. }
  354. /* Monitor modes require CEC_MODE_NO_INITIATOR */
  355. if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
  356. dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
  357. __func__);
  358. return -EINVAL;
  359. }
  360. /* Monitor modes require CAP_NET_ADMIN */
  361. if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
  362. return -EPERM;
  363. mutex_lock(&adap->lock);
  364. /*
  365. * You can't become exclusive follower if someone else already
  366. * has that job.
  367. */
  368. if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  369. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
  370. adap->cec_follower && adap->cec_follower != fh)
  371. err = -EBUSY;
  372. /*
  373. * You can't become exclusive initiator if someone else already
  374. * has that job.
  375. */
  376. if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
  377. adap->cec_initiator && adap->cec_initiator != fh)
  378. err = -EBUSY;
  379. if (!err) {
  380. bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
  381. bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
  382. if (old_mon_all != new_mon_all) {
  383. if (new_mon_all)
  384. err = cec_monitor_all_cnt_inc(adap);
  385. else
  386. cec_monitor_all_cnt_dec(adap);
  387. }
  388. }
  389. if (!err) {
  390. bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
  391. bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
  392. if (old_mon_pin != new_mon_pin) {
  393. send_pin_event = new_mon_pin;
  394. if (new_mon_pin)
  395. err = cec_monitor_pin_cnt_inc(adap);
  396. else
  397. cec_monitor_pin_cnt_dec(adap);
  398. }
  399. }
  400. if (err) {
  401. mutex_unlock(&adap->lock);
  402. return err;
  403. }
  404. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  405. adap->follower_cnt--;
  406. if (mode_follower == CEC_MODE_FOLLOWER)
  407. adap->follower_cnt++;
  408. if (send_pin_event) {
  409. struct cec_event ev = {
  410. .flags = CEC_EVENT_FL_INITIAL_STATE,
  411. };
  412. ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
  413. CEC_EVENT_PIN_CEC_LOW;
  414. cec_queue_event_fh(fh, &ev, 0);
  415. }
  416. if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  417. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  418. adap->passthrough =
  419. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
  420. adap->cec_follower = fh;
  421. } else if (adap->cec_follower == fh) {
  422. adap->passthrough = false;
  423. adap->cec_follower = NULL;
  424. }
  425. if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
  426. adap->cec_initiator = fh;
  427. else if (adap->cec_initiator == fh)
  428. adap->cec_initiator = NULL;
  429. fh->mode_initiator = mode_initiator;
  430. fh->mode_follower = mode_follower;
  431. mutex_unlock(&adap->lock);
  432. return 0;
  433. }
  434. static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  435. {
  436. struct cec_fh *fh = filp->private_data;
  437. struct cec_adapter *adap = fh->adap;
  438. bool block = !(filp->f_flags & O_NONBLOCK);
  439. void __user *parg = (void __user *)arg;
  440. if (!cec_is_registered(adap))
  441. return -ENODEV;
  442. switch (cmd) {
  443. case CEC_ADAP_G_CAPS:
  444. return cec_adap_g_caps(adap, parg);
  445. case CEC_ADAP_G_PHYS_ADDR:
  446. return cec_adap_g_phys_addr(adap, parg);
  447. case CEC_ADAP_S_PHYS_ADDR:
  448. return cec_adap_s_phys_addr(adap, fh, block, parg);
  449. case CEC_ADAP_G_LOG_ADDRS:
  450. return cec_adap_g_log_addrs(adap, parg);
  451. case CEC_ADAP_S_LOG_ADDRS:
  452. return cec_adap_s_log_addrs(adap, fh, block, parg);
  453. case CEC_TRANSMIT:
  454. return cec_transmit(adap, fh, block, parg);
  455. case CEC_RECEIVE:
  456. return cec_receive(adap, fh, block, parg);
  457. case CEC_DQEVENT:
  458. return cec_dqevent(adap, fh, block, parg);
  459. case CEC_G_MODE:
  460. return cec_g_mode(adap, fh, parg);
  461. case CEC_S_MODE:
  462. return cec_s_mode(adap, fh, parg);
  463. default:
  464. return -ENOTTY;
  465. }
  466. }
  467. static int cec_open(struct inode *inode, struct file *filp)
  468. {
  469. struct cec_devnode *devnode =
  470. container_of(inode->i_cdev, struct cec_devnode, cdev);
  471. struct cec_adapter *adap = to_cec_adapter(devnode);
  472. struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
  473. /*
  474. * Initial events that are automatically sent when the cec device is
  475. * opened.
  476. */
  477. struct cec_event ev = {
  478. .event = CEC_EVENT_STATE_CHANGE,
  479. .flags = CEC_EVENT_FL_INITIAL_STATE,
  480. };
  481. unsigned int i;
  482. int err;
  483. if (!fh)
  484. return -ENOMEM;
  485. INIT_LIST_HEAD(&fh->msgs);
  486. INIT_LIST_HEAD(&fh->xfer_list);
  487. for (i = 0; i < CEC_NUM_EVENTS; i++)
  488. INIT_LIST_HEAD(&fh->events[i]);
  489. mutex_init(&fh->lock);
  490. init_waitqueue_head(&fh->wait);
  491. fh->mode_initiator = CEC_MODE_INITIATOR;
  492. fh->adap = adap;
  493. err = cec_get_device(devnode);
  494. if (err) {
  495. kfree(fh);
  496. return err;
  497. }
  498. mutex_lock(&devnode->lock);
  499. if (list_empty(&devnode->fhs) &&
  500. !adap->needs_hpd &&
  501. adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  502. err = adap->ops->adap_enable(adap, true);
  503. if (err) {
  504. mutex_unlock(&devnode->lock);
  505. kfree(fh);
  506. return err;
  507. }
  508. }
  509. filp->private_data = fh;
  510. /* Queue up initial state events */
  511. ev.state_change.phys_addr = adap->phys_addr;
  512. ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
  513. cec_queue_event_fh(fh, &ev, 0);
  514. #ifdef CONFIG_CEC_PIN
  515. if (adap->pin && adap->pin->ops->read_hpd) {
  516. err = adap->pin->ops->read_hpd(adap);
  517. if (err >= 0) {
  518. ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
  519. CEC_EVENT_PIN_HPD_LOW;
  520. cec_queue_event_fh(fh, &ev, 0);
  521. }
  522. }
  523. if (adap->pin && adap->pin->ops->read_5v) {
  524. err = adap->pin->ops->read_5v(adap);
  525. if (err >= 0) {
  526. ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
  527. CEC_EVENT_PIN_5V_LOW;
  528. cec_queue_event_fh(fh, &ev, 0);
  529. }
  530. }
  531. #endif
  532. list_add(&fh->list, &devnode->fhs);
  533. mutex_unlock(&devnode->lock);
  534. return 0;
  535. }
  536. /* Override for the release function */
  537. static int cec_release(struct inode *inode, struct file *filp)
  538. {
  539. struct cec_devnode *devnode = cec_devnode_data(filp);
  540. struct cec_adapter *adap = to_cec_adapter(devnode);
  541. struct cec_fh *fh = filp->private_data;
  542. unsigned int i;
  543. mutex_lock(&adap->lock);
  544. if (adap->cec_initiator == fh)
  545. adap->cec_initiator = NULL;
  546. if (adap->cec_follower == fh) {
  547. adap->cec_follower = NULL;
  548. adap->passthrough = false;
  549. }
  550. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  551. adap->follower_cnt--;
  552. if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
  553. cec_monitor_pin_cnt_dec(adap);
  554. if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
  555. cec_monitor_all_cnt_dec(adap);
  556. mutex_unlock(&adap->lock);
  557. mutex_lock(&devnode->lock);
  558. list_del(&fh->list);
  559. if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
  560. !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  561. WARN_ON(adap->ops->adap_enable(adap, false));
  562. }
  563. mutex_unlock(&devnode->lock);
  564. /* Unhook pending transmits from this filehandle. */
  565. mutex_lock(&adap->lock);
  566. while (!list_empty(&fh->xfer_list)) {
  567. struct cec_data *data =
  568. list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
  569. data->blocking = false;
  570. data->fh = NULL;
  571. list_del(&data->xfer_list);
  572. }
  573. mutex_unlock(&adap->lock);
  574. while (!list_empty(&fh->msgs)) {
  575. struct cec_msg_entry *entry =
  576. list_first_entry(&fh->msgs, struct cec_msg_entry, list);
  577. list_del(&entry->list);
  578. kfree(entry);
  579. }
  580. for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
  581. while (!list_empty(&fh->events[i])) {
  582. struct cec_event_entry *entry =
  583. list_first_entry(&fh->events[i],
  584. struct cec_event_entry, list);
  585. list_del(&entry->list);
  586. kfree(entry);
  587. }
  588. }
  589. kfree(fh);
  590. cec_put_device(devnode);
  591. filp->private_data = NULL;
  592. return 0;
  593. }
  594. const struct file_operations cec_devnode_fops = {
  595. .owner = THIS_MODULE,
  596. .open = cec_open,
  597. .unlocked_ioctl = cec_ioctl,
  598. .release = cec_release,
  599. .poll = cec_poll,
  600. .llseek = no_llseek,
  601. };