ipa_modem.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2018-2024 Linaro Ltd.
  4. */
  5. #include <linux/errno.h>
  6. #include <linux/etherdevice.h>
  7. #include <linux/if_arp.h>
  8. #include <linux/if_rmnet.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/pm_runtime.h>
  11. #include <linux/skbuff.h>
  12. #include <net/pkt_sched.h>
  13. #include <linux/remoteproc/qcom_rproc.h>
  14. #include "ipa.h"
  15. #include "ipa_endpoint.h"
  16. #include "ipa_mem.h"
  17. #include "ipa_modem.h"
  18. #include "ipa_smp2p.h"
  19. #include "ipa_table.h"
  20. #include "ipa_uc.h"
  21. #define IPA_NETDEV_NAME "rmnet_ipa%d"
  22. #define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
  23. #define IPA_NETDEV_TIMEOUT 10 /* seconds */
  24. enum ipa_modem_state {
  25. IPA_MODEM_STATE_STOPPED = 0,
  26. IPA_MODEM_STATE_STARTING,
  27. IPA_MODEM_STATE_RUNNING,
  28. IPA_MODEM_STATE_STOPPING,
  29. };
  30. /**
  31. * struct ipa_priv - IPA network device private data
  32. * @ipa: IPA pointer
  33. * @tx: Transmit endpoint pointer
  34. * @rx: Receive endpoint pointer
  35. * @work: Work structure used to wake the modem netdev TX queue
  36. */
  37. struct ipa_priv {
  38. struct ipa *ipa;
  39. struct ipa_endpoint *tx;
  40. struct ipa_endpoint *rx;
  41. struct work_struct work;
  42. };
  43. /** ipa_open() - Opens the modem network interface */
  44. static int ipa_open(struct net_device *netdev)
  45. {
  46. struct ipa_priv *priv = netdev_priv(netdev);
  47. struct ipa *ipa = priv->ipa;
  48. struct device *dev;
  49. int ret;
  50. dev = ipa->dev;
  51. ret = pm_runtime_get_sync(dev);
  52. if (ret < 0)
  53. goto err_power_put;
  54. ret = ipa_endpoint_enable_one(priv->tx);
  55. if (ret)
  56. goto err_power_put;
  57. ret = ipa_endpoint_enable_one(priv->rx);
  58. if (ret)
  59. goto err_disable_tx;
  60. netif_start_queue(netdev);
  61. pm_runtime_mark_last_busy(dev);
  62. (void)pm_runtime_put_autosuspend(dev);
  63. return 0;
  64. err_disable_tx:
  65. ipa_endpoint_disable_one(priv->tx);
  66. err_power_put:
  67. pm_runtime_put_noidle(dev);
  68. return ret;
  69. }
  70. /** ipa_stop() - Stops the modem network interface. */
  71. static int ipa_stop(struct net_device *netdev)
  72. {
  73. struct ipa_priv *priv = netdev_priv(netdev);
  74. struct ipa *ipa = priv->ipa;
  75. struct device *dev;
  76. int ret;
  77. dev = ipa->dev;
  78. ret = pm_runtime_get_sync(dev);
  79. if (ret < 0)
  80. goto out_power_put;
  81. netif_stop_queue(netdev);
  82. ipa_endpoint_disable_one(priv->rx);
  83. ipa_endpoint_disable_one(priv->tx);
  84. out_power_put:
  85. pm_runtime_mark_last_busy(dev);
  86. (void)pm_runtime_put_autosuspend(dev);
  87. return 0;
  88. }
  89. /** ipa_start_xmit() - Transmit an skb
  90. * @skb: Socket buffer to be transmitted
  91. * @netdev: Network device
  92. *
  93. * Return: NETDEV_TX_OK if successful (or dropped), NETDEV_TX_BUSY otherwise
  94. * Normally NETDEV_TX_OK indicates the buffer was successfully transmitted.
  95. * If the buffer has an unexpected protocol or its size is out of range it
  96. * is quietly dropped, returning NETDEV_TX_OK. NETDEV_TX_BUSY indicates
  97. * the buffer cannot be sent at this time and should retried later.
  98. */
  99. static netdev_tx_t
  100. ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
  101. {
  102. struct net_device_stats *stats = &netdev->stats;
  103. struct ipa_priv *priv = netdev_priv(netdev);
  104. struct ipa_endpoint *endpoint;
  105. struct ipa *ipa = priv->ipa;
  106. u32 skb_len = skb->len;
  107. struct device *dev;
  108. int ret;
  109. if (!skb_len)
  110. goto err_drop_skb;
  111. endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
  112. if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
  113. goto err_drop_skb;
  114. /* The hardware must be powered for us to transmit, so if we're not
  115. * ready we want the network stack to stop queueing until power is
  116. * ACTIVE. Once runtime resume has completed, we inform the network
  117. * stack it's OK to try transmitting again.
  118. *
  119. * We learn from pm_runtime_get() whether the hardware is powered.
  120. * If it was not, powering up is either started or already underway.
  121. * And in that case we want to disable queueing, expecting it to be
  122. * re-enabled once power is ACTIVE. But runtime PM and network
  123. * transmit run concurrently, and if we're not careful the requests
  124. * to stop and start queueing could occur in the wrong order.
  125. *
  126. * For that reason we *always* stop queueing here, *before* the call
  127. * to pm_runtime_get(). If we determine here that power is ACTIVE,
  128. * we restart queueing before transmitting the SKB. Otherwise
  129. * queueing will eventually be enabled after resume completes.
  130. */
  131. netif_stop_queue(netdev);
  132. dev = ipa->dev;
  133. ret = pm_runtime_get(dev);
  134. if (ret < 1) {
  135. /* If a resume won't happen, just drop the packet */
  136. if (ret < 0 && ret != -EINPROGRESS) {
  137. netif_wake_queue(netdev);
  138. pm_runtime_put_noidle(dev);
  139. goto err_drop_skb;
  140. }
  141. pm_runtime_put_noidle(dev);
  142. return NETDEV_TX_BUSY;
  143. }
  144. netif_wake_queue(netdev);
  145. ret = ipa_endpoint_skb_tx(endpoint, skb);
  146. pm_runtime_mark_last_busy(dev);
  147. (void)pm_runtime_put_autosuspend(dev);
  148. if (ret) {
  149. if (ret != -E2BIG)
  150. return NETDEV_TX_BUSY;
  151. goto err_drop_skb;
  152. }
  153. stats->tx_packets++;
  154. stats->tx_bytes += skb_len;
  155. return NETDEV_TX_OK;
  156. err_drop_skb:
  157. dev_kfree_skb_any(skb);
  158. stats->tx_dropped++;
  159. return NETDEV_TX_OK;
  160. }
  161. void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
  162. {
  163. struct net_device_stats *stats = &netdev->stats;
  164. if (skb) {
  165. skb->dev = netdev;
  166. skb->protocol = htons(ETH_P_MAP);
  167. stats->rx_packets++;
  168. stats->rx_bytes += skb->len;
  169. (void)netif_receive_skb(skb);
  170. } else {
  171. stats->rx_dropped++;
  172. }
  173. }
  174. static const struct net_device_ops ipa_modem_ops = {
  175. .ndo_open = ipa_open,
  176. .ndo_stop = ipa_stop,
  177. .ndo_start_xmit = ipa_start_xmit,
  178. };
  179. /** ipa_modem_netdev_setup() - netdev setup function for the modem */
  180. static void ipa_modem_netdev_setup(struct net_device *netdev)
  181. {
  182. netdev->netdev_ops = &ipa_modem_ops;
  183. netdev->header_ops = NULL;
  184. netdev->type = ARPHRD_RAWIP;
  185. netdev->hard_header_len = 0;
  186. netdev->min_header_len = ETH_HLEN;
  187. netdev->min_mtu = ETH_MIN_MTU;
  188. netdev->max_mtu = IPA_MTU;
  189. netdev->mtu = netdev->max_mtu;
  190. netdev->addr_len = 0;
  191. netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
  192. netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
  193. netdev->priv_flags |= IFF_TX_SKB_SHARING;
  194. eth_broadcast_addr(netdev->broadcast);
  195. /* The endpoint is configured for QMAP */
  196. netdev->needed_headroom = sizeof(struct rmnet_map_header);
  197. netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
  198. netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
  199. netdev->hw_features = NETIF_F_SG;
  200. }
  201. /** ipa_modem_suspend() - suspend callback
  202. * @netdev: Network device
  203. *
  204. * Suspend the modem's endpoints.
  205. */
  206. void ipa_modem_suspend(struct net_device *netdev)
  207. {
  208. struct ipa_priv *priv;
  209. if (!(netdev->flags & IFF_UP))
  210. return;
  211. priv = netdev_priv(netdev);
  212. ipa_endpoint_suspend_one(priv->rx);
  213. ipa_endpoint_suspend_one(priv->tx);
  214. }
  215. /**
  216. * ipa_modem_wake_queue_work() - enable modem netdev queue
  217. * @work: Work structure
  218. *
  219. * Re-enable transmit on the modem network device. This is called
  220. * in (power management) work queue context, scheduled when resuming
  221. * the modem. We can't enable the queue directly in ipa_modem_resume()
  222. * because transmits restart the instant the queue is awakened; but the
  223. * device power state won't be ACTIVE until *after* ipa_modem_resume()
  224. * returns.
  225. */
  226. static void ipa_modem_wake_queue_work(struct work_struct *work)
  227. {
  228. struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
  229. netif_wake_queue(priv->tx->netdev);
  230. }
  231. /** ipa_modem_resume() - resume callback for runtime_pm
  232. * @dev: pointer to device
  233. *
  234. * Resume the modem's endpoints.
  235. */
  236. void ipa_modem_resume(struct net_device *netdev)
  237. {
  238. struct ipa_priv *priv;
  239. if (!(netdev->flags & IFF_UP))
  240. return;
  241. priv = netdev_priv(netdev);
  242. ipa_endpoint_resume_one(priv->tx);
  243. ipa_endpoint_resume_one(priv->rx);
  244. /* Arrange for the TX queue to be restarted */
  245. (void)queue_pm_work(&priv->work);
  246. }
  247. int ipa_modem_start(struct ipa *ipa)
  248. {
  249. enum ipa_modem_state state;
  250. struct net_device *netdev;
  251. struct ipa_priv *priv;
  252. int ret;
  253. /* Only attempt to start the modem if it's stopped */
  254. state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
  255. IPA_MODEM_STATE_STARTING);
  256. /* Silently ignore attempts when running, or when changing state */
  257. if (state != IPA_MODEM_STATE_STOPPED)
  258. return 0;
  259. netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
  260. NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
  261. if (!netdev) {
  262. ret = -ENOMEM;
  263. goto out_set_state;
  264. }
  265. SET_NETDEV_DEV(netdev, ipa->dev);
  266. priv = netdev_priv(netdev);
  267. priv->ipa = ipa;
  268. priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
  269. priv->rx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX];
  270. INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
  271. priv->tx->netdev = netdev;
  272. priv->rx->netdev = netdev;
  273. ipa->modem_netdev = netdev;
  274. ret = register_netdev(netdev);
  275. if (ret) {
  276. ipa->modem_netdev = NULL;
  277. priv->rx->netdev = NULL;
  278. priv->tx->netdev = NULL;
  279. free_netdev(netdev);
  280. }
  281. out_set_state:
  282. if (ret)
  283. atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
  284. else
  285. atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
  286. smp_mb__after_atomic();
  287. return ret;
  288. }
  289. int ipa_modem_stop(struct ipa *ipa)
  290. {
  291. struct net_device *netdev = ipa->modem_netdev;
  292. enum ipa_modem_state state;
  293. /* Only attempt to stop the modem if it's running */
  294. state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
  295. IPA_MODEM_STATE_STOPPING);
  296. /* Silently ignore attempts when already stopped */
  297. if (state == IPA_MODEM_STATE_STOPPED)
  298. return 0;
  299. /* If we're somewhere between stopped and starting, we're busy */
  300. if (state != IPA_MODEM_STATE_RUNNING)
  301. return -EBUSY;
  302. /* Clean up the netdev and endpoints if it was started */
  303. if (netdev) {
  304. struct ipa_priv *priv = netdev_priv(netdev);
  305. cancel_work_sync(&priv->work);
  306. /* If it was opened, stop it first */
  307. if (netdev->flags & IFF_UP)
  308. (void)ipa_stop(netdev);
  309. unregister_netdev(netdev);
  310. ipa->modem_netdev = NULL;
  311. priv->rx->netdev = NULL;
  312. priv->tx->netdev = NULL;
  313. free_netdev(netdev);
  314. }
  315. atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
  316. smp_mb__after_atomic();
  317. return 0;
  318. }
  319. /* Treat a "clean" modem stop the same as a crash */
  320. static void ipa_modem_crashed(struct ipa *ipa)
  321. {
  322. struct device *dev = ipa->dev;
  323. int ret;
  324. /* Prevent the modem from triggering a call to ipa_setup() */
  325. ipa_smp2p_irq_disable_setup(ipa);
  326. ret = pm_runtime_get_sync(dev);
  327. if (ret < 0) {
  328. dev_err(dev, "error %d getting power to handle crash\n", ret);
  329. goto out_power_put;
  330. }
  331. ipa_endpoint_modem_pause_all(ipa, true);
  332. ipa_endpoint_modem_hol_block_clear_all(ipa);
  333. ipa_table_reset(ipa, true);
  334. ret = ipa_table_hash_flush(ipa);
  335. if (ret)
  336. dev_err(dev, "error %d flushing hash caches\n", ret);
  337. ret = ipa_endpoint_modem_exception_reset_all(ipa);
  338. if (ret)
  339. dev_err(dev, "error %d resetting exception endpoint\n", ret);
  340. ipa_endpoint_modem_pause_all(ipa, false);
  341. ret = ipa_modem_stop(ipa);
  342. if (ret)
  343. dev_err(dev, "error %d stopping modem\n", ret);
  344. /* Now prepare for the next modem boot */
  345. ret = ipa_mem_zero_modem(ipa);
  346. if (ret)
  347. dev_err(dev, "error %d zeroing modem memory regions\n", ret);
  348. out_power_put:
  349. pm_runtime_mark_last_busy(dev);
  350. (void)pm_runtime_put_autosuspend(dev);
  351. }
  352. static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
  353. void *data)
  354. {
  355. struct ipa *ipa = container_of(nb, struct ipa, nb);
  356. struct qcom_ssr_notify_data *notify_data = data;
  357. struct device *dev = ipa->dev;
  358. switch (action) {
  359. case QCOM_SSR_BEFORE_POWERUP:
  360. dev_info(dev, "received modem starting event\n");
  361. ipa_uc_power(ipa);
  362. ipa_smp2p_notify_reset(ipa);
  363. break;
  364. case QCOM_SSR_AFTER_POWERUP:
  365. dev_info(dev, "received modem running event\n");
  366. break;
  367. case QCOM_SSR_BEFORE_SHUTDOWN:
  368. dev_info(dev, "received modem %s event\n",
  369. notify_data->crashed ? "crashed" : "stopping");
  370. if (ipa->setup_complete)
  371. ipa_modem_crashed(ipa);
  372. break;
  373. case QCOM_SSR_AFTER_SHUTDOWN:
  374. dev_info(dev, "received modem offline event\n");
  375. break;
  376. default:
  377. dev_err(dev, "received unrecognized event %lu\n", action);
  378. break;
  379. }
  380. return NOTIFY_OK;
  381. }
  382. int ipa_modem_config(struct ipa *ipa)
  383. {
  384. void *notifier;
  385. ipa->nb.notifier_call = ipa_modem_notify;
  386. notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
  387. if (IS_ERR(notifier))
  388. return PTR_ERR(notifier);
  389. ipa->notifier = notifier;
  390. return 0;
  391. }
  392. void ipa_modem_deconfig(struct ipa *ipa)
  393. {
  394. struct device *dev = ipa->dev;
  395. int ret;
  396. ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
  397. if (ret)
  398. dev_err(dev, "error %d unregistering notifier", ret);
  399. ipa->notifier = NULL;
  400. memset(&ipa->nb, 0, sizeof(ipa->nb));
  401. }