wmi-ops.h 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  5. *
  6. * Permission to use, copy, modify, and/or distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _WMI_OPS_H_
  19. #define _WMI_OPS_H_
  20. struct ath10k;
  21. struct sk_buff;
  22. struct wmi_ops {
  23. void (*rx)(struct ath10k *ar, struct sk_buff *skb);
  24. void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
  25. void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
  26. int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
  27. struct wmi_scan_ev_arg *arg);
  28. int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
  29. struct wmi_mgmt_rx_ev_arg *arg);
  30. int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
  31. struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
  32. int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
  33. struct wmi_ch_info_ev_arg *arg);
  34. int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
  35. struct wmi_vdev_start_ev_arg *arg);
  36. int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
  37. struct wmi_peer_kick_ev_arg *arg);
  38. int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
  39. struct wmi_swba_ev_arg *arg);
  40. int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
  41. struct wmi_phyerr_hdr_arg *arg);
  42. int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
  43. int left_len, struct wmi_phyerr_ev_arg *arg);
  44. int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
  45. struct wmi_svc_rdy_ev_arg *arg);
  46. int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
  47. struct wmi_rdy_ev_arg *arg);
  48. int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
  49. struct ath10k_fw_stats *stats);
  50. int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
  51. struct wmi_roam_ev_arg *arg);
  52. int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
  53. struct wmi_wow_ev_arg *arg);
  54. int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
  55. struct wmi_echo_ev_arg *arg);
  56. int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
  57. struct wmi_dfs_status_ev_arg *arg);
  58. int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
  59. struct wmi_svc_avail_ev_arg *arg);
  60. enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
  61. struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
  62. struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
  63. struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
  64. u16 rd5g, u16 ctl2g, u16 ctl5g,
  65. enum wmi_dfs_region dfs_reg);
  66. struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
  67. u32 value);
  68. struct sk_buff *(*gen_init)(struct ath10k *ar);
  69. struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
  70. const struct wmi_start_scan_arg *arg);
  71. struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
  72. const struct wmi_stop_scan_arg *arg);
  73. struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
  74. enum wmi_vdev_type type,
  75. enum wmi_vdev_subtype subtype,
  76. const u8 macaddr[ETH_ALEN]);
  77. struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
  78. struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
  79. const struct wmi_vdev_start_request_arg *arg,
  80. bool restart);
  81. struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
  82. struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
  83. const u8 *bssid);
  84. struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
  85. struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
  86. u32 param_id, u32 param_value);
  87. struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
  88. const struct wmi_vdev_install_key_arg *arg);
  89. struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
  90. const struct wmi_vdev_spectral_conf_arg *arg);
  91. struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
  92. u32 trigger, u32 enable);
  93. struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
  94. const struct wmi_wmm_params_all_arg *arg);
  95. struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
  96. const u8 peer_addr[ETH_ALEN],
  97. enum wmi_peer_type peer_type);
  98. struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
  99. const u8 peer_addr[ETH_ALEN]);
  100. struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
  101. const u8 peer_addr[ETH_ALEN],
  102. u32 tid_bitmap);
  103. struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
  104. const u8 *peer_addr,
  105. enum wmi_peer_param param_id,
  106. u32 param_value);
  107. struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
  108. const struct wmi_peer_assoc_complete_arg *arg);
  109. struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
  110. enum wmi_sta_ps_mode psmode);
  111. struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
  112. enum wmi_sta_powersave_param param_id,
  113. u32 value);
  114. struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
  115. const u8 *mac,
  116. enum wmi_ap_ps_peer_param param_id,
  117. u32 value);
  118. struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
  119. const struct wmi_scan_chan_list_arg *arg);
  120. struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
  121. u32 prob_req_oui);
  122. struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
  123. const void *bcn, size_t bcn_len,
  124. u32 bcn_paddr, bool dtim_zero,
  125. bool deliver_cab);
  126. struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
  127. const struct wmi_wmm_params_all_arg *arg);
  128. struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
  129. struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
  130. enum wmi_force_fw_hang_type type,
  131. u32 delay_ms);
  132. struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
  133. struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
  134. struct sk_buff *skb,
  135. dma_addr_t paddr);
  136. int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
  137. struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
  138. u32 log_level);
  139. struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
  140. struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
  141. struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
  142. u32 period, u32 duration,
  143. u32 next_offset,
  144. u32 enabled);
  145. struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
  146. struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
  147. const u8 *mac);
  148. struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
  149. const u8 *mac, u32 tid, u32 buf_size);
  150. struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
  151. const u8 *mac, u32 tid,
  152. u32 status);
  153. struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
  154. const u8 *mac, u32 tid, u32 initiator,
  155. u32 reason);
  156. struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
  157. u32 tim_ie_offset, struct sk_buff *bcn,
  158. u32 prb_caps, u32 prb_erp,
  159. void *prb_ies, size_t prb_ies_len);
  160. struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
  161. struct sk_buff *bcn);
  162. struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
  163. const u8 *p2p_ie);
  164. struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
  165. const u8 peer_addr[ETH_ALEN],
  166. const struct wmi_sta_uapsd_auto_trig_arg *args,
  167. u32 num_ac);
  168. struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
  169. const struct wmi_sta_keepalive_arg *arg);
  170. struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
  171. struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
  172. enum wmi_wow_wakeup_event event,
  173. u32 enable);
  174. struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
  175. struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
  176. u32 pattern_id,
  177. const u8 *pattern,
  178. const u8 *mask,
  179. int pattern_len,
  180. int pattern_offset);
  181. struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
  182. u32 pattern_id);
  183. struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
  184. u32 vdev_id,
  185. enum wmi_tdls_state state);
  186. struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
  187. const struct wmi_tdls_peer_update_cmd_arg *arg,
  188. const struct wmi_tdls_peer_capab_arg *cap,
  189. const struct wmi_channel_arg *chan);
  190. struct sk_buff *(*gen_radar_found)
  191. (struct ath10k *ar,
  192. const struct ath10k_radar_found_info *arg);
  193. struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
  194. struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
  195. u32 param);
  196. void (*fw_stats_fill)(struct ath10k *ar,
  197. struct ath10k_fw_stats *fw_stats,
  198. char *buf);
  199. struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
  200. u8 enable,
  201. u32 detect_level,
  202. u32 detect_margin);
  203. struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
  204. enum wmi_host_platform_type type,
  205. u32 fw_feature_bitmap);
  206. int (*get_vdev_subtype)(struct ath10k *ar,
  207. enum wmi_vdev_subtype subtype);
  208. struct sk_buff *(*gen_pdev_bss_chan_info_req)
  209. (struct ath10k *ar,
  210. enum wmi_bss_survey_req_type type);
  211. struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
  212. struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
  213. u32 param);
  214. };
  215. int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
  216. static inline int
  217. ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
  218. {
  219. if (WARN_ON_ONCE(!ar->wmi.ops->rx))
  220. return -EOPNOTSUPP;
  221. ar->wmi.ops->rx(ar, skb);
  222. return 0;
  223. }
  224. static inline int
  225. ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
  226. size_t len)
  227. {
  228. if (!ar->wmi.ops->map_svc)
  229. return -EOPNOTSUPP;
  230. ar->wmi.ops->map_svc(in, out, len);
  231. return 0;
  232. }
  233. static inline int
  234. ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
  235. size_t len)
  236. {
  237. if (!ar->wmi.ops->map_svc_ext)
  238. return -EOPNOTSUPP;
  239. ar->wmi.ops->map_svc_ext(in, out, len);
  240. return 0;
  241. }
  242. static inline int
  243. ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
  244. struct wmi_scan_ev_arg *arg)
  245. {
  246. if (!ar->wmi.ops->pull_scan)
  247. return -EOPNOTSUPP;
  248. return ar->wmi.ops->pull_scan(ar, skb, arg);
  249. }
  250. static inline int
  251. ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
  252. struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
  253. {
  254. if (!ar->wmi.ops->pull_mgmt_tx_compl)
  255. return -EOPNOTSUPP;
  256. return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
  257. }
  258. static inline int
  259. ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
  260. struct wmi_mgmt_rx_ev_arg *arg)
  261. {
  262. if (!ar->wmi.ops->pull_mgmt_rx)
  263. return -EOPNOTSUPP;
  264. return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
  265. }
  266. static inline int
  267. ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
  268. struct wmi_ch_info_ev_arg *arg)
  269. {
  270. if (!ar->wmi.ops->pull_ch_info)
  271. return -EOPNOTSUPP;
  272. return ar->wmi.ops->pull_ch_info(ar, skb, arg);
  273. }
  274. static inline int
  275. ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
  276. struct wmi_vdev_start_ev_arg *arg)
  277. {
  278. if (!ar->wmi.ops->pull_vdev_start)
  279. return -EOPNOTSUPP;
  280. return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
  281. }
  282. static inline int
  283. ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
  284. struct wmi_peer_kick_ev_arg *arg)
  285. {
  286. if (!ar->wmi.ops->pull_peer_kick)
  287. return -EOPNOTSUPP;
  288. return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
  289. }
  290. static inline int
  291. ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
  292. struct wmi_swba_ev_arg *arg)
  293. {
  294. if (!ar->wmi.ops->pull_swba)
  295. return -EOPNOTSUPP;
  296. return ar->wmi.ops->pull_swba(ar, skb, arg);
  297. }
  298. static inline int
  299. ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
  300. struct wmi_phyerr_hdr_arg *arg)
  301. {
  302. if (!ar->wmi.ops->pull_phyerr_hdr)
  303. return -EOPNOTSUPP;
  304. return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
  305. }
  306. static inline int
  307. ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
  308. int left_len, struct wmi_phyerr_ev_arg *arg)
  309. {
  310. if (!ar->wmi.ops->pull_phyerr)
  311. return -EOPNOTSUPP;
  312. return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
  313. }
  314. static inline int
  315. ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
  316. struct wmi_svc_rdy_ev_arg *arg)
  317. {
  318. if (!ar->wmi.ops->pull_svc_rdy)
  319. return -EOPNOTSUPP;
  320. return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
  321. }
  322. static inline int
  323. ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
  324. struct wmi_rdy_ev_arg *arg)
  325. {
  326. if (!ar->wmi.ops->pull_rdy)
  327. return -EOPNOTSUPP;
  328. return ar->wmi.ops->pull_rdy(ar, skb, arg);
  329. }
  330. static inline int
  331. ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
  332. struct wmi_svc_avail_ev_arg *arg)
  333. {
  334. if (!ar->wmi.ops->pull_svc_avail)
  335. return -EOPNOTSUPP;
  336. return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
  337. }
  338. static inline int
  339. ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
  340. struct ath10k_fw_stats *stats)
  341. {
  342. if (!ar->wmi.ops->pull_fw_stats)
  343. return -EOPNOTSUPP;
  344. return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
  345. }
  346. static inline int
  347. ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
  348. struct wmi_roam_ev_arg *arg)
  349. {
  350. if (!ar->wmi.ops->pull_roam_ev)
  351. return -EOPNOTSUPP;
  352. return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
  353. }
  354. static inline int
  355. ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
  356. struct wmi_wow_ev_arg *arg)
  357. {
  358. if (!ar->wmi.ops->pull_wow_event)
  359. return -EOPNOTSUPP;
  360. return ar->wmi.ops->pull_wow_event(ar, skb, arg);
  361. }
  362. static inline int
  363. ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
  364. struct wmi_echo_ev_arg *arg)
  365. {
  366. if (!ar->wmi.ops->pull_echo_ev)
  367. return -EOPNOTSUPP;
  368. return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
  369. }
  370. static inline int
  371. ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
  372. struct wmi_dfs_status_ev_arg *arg)
  373. {
  374. if (!ar->wmi.ops->pull_dfs_status_ev)
  375. return -EOPNOTSUPP;
  376. return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
  377. }
  378. static inline enum wmi_txbf_conf
  379. ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
  380. {
  381. if (!ar->wmi.ops->get_txbf_conf_scheme)
  382. return WMI_TXBF_CONF_UNSUPPORTED;
  383. return ar->wmi.ops->get_txbf_conf_scheme(ar);
  384. }
  385. static inline int
  386. ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
  387. {
  388. if (!ar->wmi.ops->cleanup_mgmt_tx_send)
  389. return -EOPNOTSUPP;
  390. return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
  391. }
  392. static inline int
  393. ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
  394. dma_addr_t paddr)
  395. {
  396. struct sk_buff *skb;
  397. int ret;
  398. if (!ar->wmi.ops->gen_mgmt_tx_send)
  399. return -EOPNOTSUPP;
  400. skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
  401. if (IS_ERR(skb))
  402. return PTR_ERR(skb);
  403. ret = ath10k_wmi_cmd_send(ar, skb,
  404. ar->wmi.cmd->mgmt_tx_send_cmdid);
  405. if (ret)
  406. return ret;
  407. return 0;
  408. }
  409. static inline int
  410. ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
  411. {
  412. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
  413. struct sk_buff *skb;
  414. int ret;
  415. if (!ar->wmi.ops->gen_mgmt_tx)
  416. return -EOPNOTSUPP;
  417. skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
  418. if (IS_ERR(skb))
  419. return PTR_ERR(skb);
  420. ret = ath10k_wmi_cmd_send(ar, skb,
  421. ar->wmi.cmd->mgmt_tx_cmdid);
  422. if (ret)
  423. return ret;
  424. /* FIXME There's no ACK event for Management Tx. This probably
  425. * shouldn't be called here either.
  426. */
  427. info->flags |= IEEE80211_TX_STAT_ACK;
  428. ieee80211_tx_status_irqsafe(ar->hw, msdu);
  429. return 0;
  430. }
  431. static inline int
  432. ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
  433. u16 ctl2g, u16 ctl5g,
  434. enum wmi_dfs_region dfs_reg)
  435. {
  436. struct sk_buff *skb;
  437. if (!ar->wmi.ops->gen_pdev_set_rd)
  438. return -EOPNOTSUPP;
  439. skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
  440. dfs_reg);
  441. if (IS_ERR(skb))
  442. return PTR_ERR(skb);
  443. return ath10k_wmi_cmd_send(ar, skb,
  444. ar->wmi.cmd->pdev_set_regdomain_cmdid);
  445. }
  446. static inline int
  447. ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
  448. {
  449. struct sk_buff *skb;
  450. if (!ar->wmi.ops->gen_pdev_suspend)
  451. return -EOPNOTSUPP;
  452. skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
  453. if (IS_ERR(skb))
  454. return PTR_ERR(skb);
  455. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
  456. }
  457. static inline int
  458. ath10k_wmi_pdev_resume_target(struct ath10k *ar)
  459. {
  460. struct sk_buff *skb;
  461. if (!ar->wmi.ops->gen_pdev_resume)
  462. return -EOPNOTSUPP;
  463. skb = ar->wmi.ops->gen_pdev_resume(ar);
  464. if (IS_ERR(skb))
  465. return PTR_ERR(skb);
  466. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
  467. }
  468. static inline int
  469. ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
  470. {
  471. struct sk_buff *skb;
  472. if (!ar->wmi.ops->gen_pdev_set_param)
  473. return -EOPNOTSUPP;
  474. skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
  475. if (IS_ERR(skb))
  476. return PTR_ERR(skb);
  477. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
  478. }
  479. static inline int
  480. ath10k_wmi_cmd_init(struct ath10k *ar)
  481. {
  482. struct sk_buff *skb;
  483. if (!ar->wmi.ops->gen_init)
  484. return -EOPNOTSUPP;
  485. skb = ar->wmi.ops->gen_init(ar);
  486. if (IS_ERR(skb))
  487. return PTR_ERR(skb);
  488. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
  489. }
  490. static inline int
  491. ath10k_wmi_start_scan(struct ath10k *ar,
  492. const struct wmi_start_scan_arg *arg)
  493. {
  494. struct sk_buff *skb;
  495. if (!ar->wmi.ops->gen_start_scan)
  496. return -EOPNOTSUPP;
  497. skb = ar->wmi.ops->gen_start_scan(ar, arg);
  498. if (IS_ERR(skb))
  499. return PTR_ERR(skb);
  500. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
  501. }
  502. static inline int
  503. ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
  504. {
  505. struct sk_buff *skb;
  506. if (!ar->wmi.ops->gen_stop_scan)
  507. return -EOPNOTSUPP;
  508. skb = ar->wmi.ops->gen_stop_scan(ar, arg);
  509. if (IS_ERR(skb))
  510. return PTR_ERR(skb);
  511. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
  512. }
  513. static inline int
  514. ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
  515. enum wmi_vdev_type type,
  516. enum wmi_vdev_subtype subtype,
  517. const u8 macaddr[ETH_ALEN])
  518. {
  519. struct sk_buff *skb;
  520. if (!ar->wmi.ops->gen_vdev_create)
  521. return -EOPNOTSUPP;
  522. skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
  523. if (IS_ERR(skb))
  524. return PTR_ERR(skb);
  525. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
  526. }
  527. static inline int
  528. ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
  529. {
  530. struct sk_buff *skb;
  531. if (!ar->wmi.ops->gen_vdev_delete)
  532. return -EOPNOTSUPP;
  533. skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
  534. if (IS_ERR(skb))
  535. return PTR_ERR(skb);
  536. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
  537. }
  538. static inline int
  539. ath10k_wmi_vdev_start(struct ath10k *ar,
  540. const struct wmi_vdev_start_request_arg *arg)
  541. {
  542. struct sk_buff *skb;
  543. if (!ar->wmi.ops->gen_vdev_start)
  544. return -EOPNOTSUPP;
  545. skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
  546. if (IS_ERR(skb))
  547. return PTR_ERR(skb);
  548. return ath10k_wmi_cmd_send(ar, skb,
  549. ar->wmi.cmd->vdev_start_request_cmdid);
  550. }
  551. static inline int
  552. ath10k_wmi_vdev_restart(struct ath10k *ar,
  553. const struct wmi_vdev_start_request_arg *arg)
  554. {
  555. struct sk_buff *skb;
  556. if (!ar->wmi.ops->gen_vdev_start)
  557. return -EOPNOTSUPP;
  558. skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
  559. if (IS_ERR(skb))
  560. return PTR_ERR(skb);
  561. return ath10k_wmi_cmd_send(ar, skb,
  562. ar->wmi.cmd->vdev_restart_request_cmdid);
  563. }
  564. static inline int
  565. ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
  566. {
  567. struct sk_buff *skb;
  568. if (!ar->wmi.ops->gen_vdev_stop)
  569. return -EOPNOTSUPP;
  570. skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
  571. if (IS_ERR(skb))
  572. return PTR_ERR(skb);
  573. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
  574. }
  575. static inline int
  576. ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
  577. {
  578. struct sk_buff *skb;
  579. if (!ar->wmi.ops->gen_vdev_up)
  580. return -EOPNOTSUPP;
  581. skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
  582. if (IS_ERR(skb))
  583. return PTR_ERR(skb);
  584. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
  585. }
  586. static inline int
  587. ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
  588. {
  589. struct sk_buff *skb;
  590. if (!ar->wmi.ops->gen_vdev_down)
  591. return -EOPNOTSUPP;
  592. skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
  593. if (IS_ERR(skb))
  594. return PTR_ERR(skb);
  595. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
  596. }
  597. static inline int
  598. ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
  599. u32 param_value)
  600. {
  601. struct sk_buff *skb;
  602. if (!ar->wmi.ops->gen_vdev_set_param)
  603. return -EOPNOTSUPP;
  604. skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
  605. param_value);
  606. if (IS_ERR(skb))
  607. return PTR_ERR(skb);
  608. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
  609. }
  610. static inline int
  611. ath10k_wmi_vdev_install_key(struct ath10k *ar,
  612. const struct wmi_vdev_install_key_arg *arg)
  613. {
  614. struct sk_buff *skb;
  615. if (!ar->wmi.ops->gen_vdev_install_key)
  616. return -EOPNOTSUPP;
  617. skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
  618. if (IS_ERR(skb))
  619. return PTR_ERR(skb);
  620. return ath10k_wmi_cmd_send(ar, skb,
  621. ar->wmi.cmd->vdev_install_key_cmdid);
  622. }
  623. static inline int
  624. ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
  625. const struct wmi_vdev_spectral_conf_arg *arg)
  626. {
  627. struct sk_buff *skb;
  628. u32 cmd_id;
  629. if (!ar->wmi.ops->gen_vdev_spectral_conf)
  630. return -EOPNOTSUPP;
  631. skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
  632. if (IS_ERR(skb))
  633. return PTR_ERR(skb);
  634. cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
  635. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  636. }
  637. static inline int
  638. ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
  639. u32 enable)
  640. {
  641. struct sk_buff *skb;
  642. u32 cmd_id;
  643. if (!ar->wmi.ops->gen_vdev_spectral_enable)
  644. return -EOPNOTSUPP;
  645. skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
  646. enable);
  647. if (IS_ERR(skb))
  648. return PTR_ERR(skb);
  649. cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
  650. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  651. }
  652. static inline int
  653. ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  654. const u8 peer_addr[ETH_ALEN],
  655. const struct wmi_sta_uapsd_auto_trig_arg *args,
  656. u32 num_ac)
  657. {
  658. struct sk_buff *skb;
  659. u32 cmd_id;
  660. if (!ar->wmi.ops->gen_vdev_sta_uapsd)
  661. return -EOPNOTSUPP;
  662. skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
  663. num_ac);
  664. if (IS_ERR(skb))
  665. return PTR_ERR(skb);
  666. cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
  667. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  668. }
  669. static inline int
  670. ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  671. const struct wmi_wmm_params_all_arg *arg)
  672. {
  673. struct sk_buff *skb;
  674. u32 cmd_id;
  675. skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
  676. if (IS_ERR(skb))
  677. return PTR_ERR(skb);
  678. cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
  679. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  680. }
  681. static inline int
  682. ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
  683. const u8 peer_addr[ETH_ALEN],
  684. enum wmi_peer_type peer_type)
  685. {
  686. struct sk_buff *skb;
  687. if (!ar->wmi.ops->gen_peer_create)
  688. return -EOPNOTSUPP;
  689. skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
  690. if (IS_ERR(skb))
  691. return PTR_ERR(skb);
  692. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
  693. }
  694. static inline int
  695. ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
  696. const u8 peer_addr[ETH_ALEN])
  697. {
  698. struct sk_buff *skb;
  699. if (!ar->wmi.ops->gen_peer_delete)
  700. return -EOPNOTSUPP;
  701. skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
  702. if (IS_ERR(skb))
  703. return PTR_ERR(skb);
  704. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
  705. }
  706. static inline int
  707. ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
  708. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  709. {
  710. struct sk_buff *skb;
  711. if (!ar->wmi.ops->gen_peer_flush)
  712. return -EOPNOTSUPP;
  713. skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
  714. if (IS_ERR(skb))
  715. return PTR_ERR(skb);
  716. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
  717. }
  718. static inline int
  719. ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
  720. enum wmi_peer_param param_id, u32 param_value)
  721. {
  722. struct sk_buff *skb;
  723. if (!ar->wmi.ops->gen_peer_set_param)
  724. return -EOPNOTSUPP;
  725. skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
  726. param_value);
  727. if (IS_ERR(skb))
  728. return PTR_ERR(skb);
  729. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
  730. }
  731. static inline int
  732. ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
  733. enum wmi_sta_ps_mode psmode)
  734. {
  735. struct sk_buff *skb;
  736. if (!ar->wmi.ops->gen_set_psmode)
  737. return -EOPNOTSUPP;
  738. skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
  739. if (IS_ERR(skb))
  740. return PTR_ERR(skb);
  741. return ath10k_wmi_cmd_send(ar, skb,
  742. ar->wmi.cmd->sta_powersave_mode_cmdid);
  743. }
  744. static inline int
  745. ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
  746. enum wmi_sta_powersave_param param_id, u32 value)
  747. {
  748. struct sk_buff *skb;
  749. if (!ar->wmi.ops->gen_set_sta_ps)
  750. return -EOPNOTSUPP;
  751. skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
  752. if (IS_ERR(skb))
  753. return PTR_ERR(skb);
  754. return ath10k_wmi_cmd_send(ar, skb,
  755. ar->wmi.cmd->sta_powersave_param_cmdid);
  756. }
  757. static inline int
  758. ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  759. enum wmi_ap_ps_peer_param param_id, u32 value)
  760. {
  761. struct sk_buff *skb;
  762. if (!ar->wmi.ops->gen_set_ap_ps)
  763. return -EOPNOTSUPP;
  764. skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
  765. if (IS_ERR(skb))
  766. return PTR_ERR(skb);
  767. return ath10k_wmi_cmd_send(ar, skb,
  768. ar->wmi.cmd->ap_ps_peer_param_cmdid);
  769. }
  770. static inline int
  771. ath10k_wmi_scan_chan_list(struct ath10k *ar,
  772. const struct wmi_scan_chan_list_arg *arg)
  773. {
  774. struct sk_buff *skb;
  775. if (!ar->wmi.ops->gen_scan_chan_list)
  776. return -EOPNOTSUPP;
  777. skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
  778. if (IS_ERR(skb))
  779. return PTR_ERR(skb);
  780. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
  781. }
  782. static inline int
  783. ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
  784. {
  785. struct sk_buff *skb;
  786. u32 prob_req_oui;
  787. prob_req_oui = (((u32)mac_addr[0]) << 16) |
  788. (((u32)mac_addr[1]) << 8) | mac_addr[2];
  789. if (!ar->wmi.ops->gen_scan_prob_req_oui)
  790. return -EOPNOTSUPP;
  791. skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
  792. if (IS_ERR(skb))
  793. return PTR_ERR(skb);
  794. return ath10k_wmi_cmd_send(ar, skb,
  795. ar->wmi.cmd->scan_prob_req_oui_cmdid);
  796. }
  797. static inline int
  798. ath10k_wmi_peer_assoc(struct ath10k *ar,
  799. const struct wmi_peer_assoc_complete_arg *arg)
  800. {
  801. struct sk_buff *skb;
  802. if (!ar->wmi.ops->gen_peer_assoc)
  803. return -EOPNOTSUPP;
  804. skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
  805. if (IS_ERR(skb))
  806. return PTR_ERR(skb);
  807. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
  808. }
  809. static inline int
  810. ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
  811. const void *bcn, size_t bcn_len,
  812. u32 bcn_paddr, bool dtim_zero,
  813. bool deliver_cab)
  814. {
  815. struct sk_buff *skb;
  816. int ret;
  817. if (!ar->wmi.ops->gen_beacon_dma)
  818. return -EOPNOTSUPP;
  819. skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
  820. dtim_zero, deliver_cab);
  821. if (IS_ERR(skb))
  822. return PTR_ERR(skb);
  823. ret = ath10k_wmi_cmd_send_nowait(ar, skb,
  824. ar->wmi.cmd->pdev_send_bcn_cmdid);
  825. if (ret) {
  826. dev_kfree_skb(skb);
  827. return ret;
  828. }
  829. return 0;
  830. }
  831. static inline int
  832. ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
  833. const struct wmi_wmm_params_all_arg *arg)
  834. {
  835. struct sk_buff *skb;
  836. if (!ar->wmi.ops->gen_pdev_set_wmm)
  837. return -EOPNOTSUPP;
  838. skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
  839. if (IS_ERR(skb))
  840. return PTR_ERR(skb);
  841. return ath10k_wmi_cmd_send(ar, skb,
  842. ar->wmi.cmd->pdev_set_wmm_params_cmdid);
  843. }
  844. static inline int
  845. ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
  846. {
  847. struct sk_buff *skb;
  848. if (!ar->wmi.ops->gen_request_stats)
  849. return -EOPNOTSUPP;
  850. skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
  851. if (IS_ERR(skb))
  852. return PTR_ERR(skb);
  853. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
  854. }
  855. static inline int
  856. ath10k_wmi_force_fw_hang(struct ath10k *ar,
  857. enum wmi_force_fw_hang_type type, u32 delay_ms)
  858. {
  859. struct sk_buff *skb;
  860. if (!ar->wmi.ops->gen_force_fw_hang)
  861. return -EOPNOTSUPP;
  862. skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
  863. if (IS_ERR(skb))
  864. return PTR_ERR(skb);
  865. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
  866. }
  867. static inline int
  868. ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
  869. {
  870. struct sk_buff *skb;
  871. if (!ar->wmi.ops->gen_dbglog_cfg)
  872. return -EOPNOTSUPP;
  873. skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
  874. if (IS_ERR(skb))
  875. return PTR_ERR(skb);
  876. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
  877. }
  878. static inline int
  879. ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
  880. {
  881. struct sk_buff *skb;
  882. if (!ar->wmi.ops->gen_pktlog_enable)
  883. return -EOPNOTSUPP;
  884. skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
  885. if (IS_ERR(skb))
  886. return PTR_ERR(skb);
  887. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
  888. }
  889. static inline int
  890. ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
  891. {
  892. struct sk_buff *skb;
  893. if (!ar->wmi.ops->gen_pktlog_disable)
  894. return -EOPNOTSUPP;
  895. skb = ar->wmi.ops->gen_pktlog_disable(ar);
  896. if (IS_ERR(skb))
  897. return PTR_ERR(skb);
  898. return ath10k_wmi_cmd_send(ar, skb,
  899. ar->wmi.cmd->pdev_pktlog_disable_cmdid);
  900. }
  901. static inline int
  902. ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
  903. u32 next_offset, u32 enabled)
  904. {
  905. struct sk_buff *skb;
  906. if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
  907. return -EOPNOTSUPP;
  908. skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
  909. next_offset, enabled);
  910. if (IS_ERR(skb))
  911. return PTR_ERR(skb);
  912. return ath10k_wmi_cmd_send(ar, skb,
  913. ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
  914. }
  915. static inline int
  916. ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
  917. {
  918. struct sk_buff *skb;
  919. if (!ar->wmi.ops->gen_pdev_get_temperature)
  920. return -EOPNOTSUPP;
  921. skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
  922. if (IS_ERR(skb))
  923. return PTR_ERR(skb);
  924. return ath10k_wmi_cmd_send(ar, skb,
  925. ar->wmi.cmd->pdev_get_temperature_cmdid);
  926. }
  927. static inline int
  928. ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
  929. {
  930. struct sk_buff *skb;
  931. if (!ar->wmi.ops->gen_addba_clear_resp)
  932. return -EOPNOTSUPP;
  933. skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
  934. if (IS_ERR(skb))
  935. return PTR_ERR(skb);
  936. return ath10k_wmi_cmd_send(ar, skb,
  937. ar->wmi.cmd->addba_clear_resp_cmdid);
  938. }
  939. static inline int
  940. ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  941. u32 tid, u32 buf_size)
  942. {
  943. struct sk_buff *skb;
  944. if (!ar->wmi.ops->gen_addba_send)
  945. return -EOPNOTSUPP;
  946. skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
  947. if (IS_ERR(skb))
  948. return PTR_ERR(skb);
  949. return ath10k_wmi_cmd_send(ar, skb,
  950. ar->wmi.cmd->addba_send_cmdid);
  951. }
  952. static inline int
  953. ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  954. u32 tid, u32 status)
  955. {
  956. struct sk_buff *skb;
  957. if (!ar->wmi.ops->gen_addba_set_resp)
  958. return -EOPNOTSUPP;
  959. skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
  960. if (IS_ERR(skb))
  961. return PTR_ERR(skb);
  962. return ath10k_wmi_cmd_send(ar, skb,
  963. ar->wmi.cmd->addba_set_resp_cmdid);
  964. }
  965. static inline int
  966. ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  967. u32 tid, u32 initiator, u32 reason)
  968. {
  969. struct sk_buff *skb;
  970. if (!ar->wmi.ops->gen_delba_send)
  971. return -EOPNOTSUPP;
  972. skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
  973. reason);
  974. if (IS_ERR(skb))
  975. return PTR_ERR(skb);
  976. return ath10k_wmi_cmd_send(ar, skb,
  977. ar->wmi.cmd->delba_send_cmdid);
  978. }
  979. static inline int
  980. ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
  981. struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
  982. void *prb_ies, size_t prb_ies_len)
  983. {
  984. struct sk_buff *skb;
  985. if (!ar->wmi.ops->gen_bcn_tmpl)
  986. return -EOPNOTSUPP;
  987. skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
  988. prb_caps, prb_erp, prb_ies,
  989. prb_ies_len);
  990. if (IS_ERR(skb))
  991. return PTR_ERR(skb);
  992. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
  993. }
  994. static inline int
  995. ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
  996. {
  997. struct sk_buff *skb;
  998. if (!ar->wmi.ops->gen_prb_tmpl)
  999. return -EOPNOTSUPP;
  1000. skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
  1001. if (IS_ERR(skb))
  1002. return PTR_ERR(skb);
  1003. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
  1004. }
  1005. static inline int
  1006. ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
  1007. {
  1008. struct sk_buff *skb;
  1009. if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
  1010. return -EOPNOTSUPP;
  1011. skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
  1012. if (IS_ERR(skb))
  1013. return PTR_ERR(skb);
  1014. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
  1015. }
  1016. static inline int
  1017. ath10k_wmi_sta_keepalive(struct ath10k *ar,
  1018. const struct wmi_sta_keepalive_arg *arg)
  1019. {
  1020. struct sk_buff *skb;
  1021. u32 cmd_id;
  1022. if (!ar->wmi.ops->gen_sta_keepalive)
  1023. return -EOPNOTSUPP;
  1024. skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
  1025. if (IS_ERR(skb))
  1026. return PTR_ERR(skb);
  1027. cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
  1028. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1029. }
  1030. static inline int
  1031. ath10k_wmi_wow_enable(struct ath10k *ar)
  1032. {
  1033. struct sk_buff *skb;
  1034. u32 cmd_id;
  1035. if (!ar->wmi.ops->gen_wow_enable)
  1036. return -EOPNOTSUPP;
  1037. skb = ar->wmi.ops->gen_wow_enable(ar);
  1038. if (IS_ERR(skb))
  1039. return PTR_ERR(skb);
  1040. cmd_id = ar->wmi.cmd->wow_enable_cmdid;
  1041. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1042. }
  1043. static inline int
  1044. ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
  1045. enum wmi_wow_wakeup_event event,
  1046. u32 enable)
  1047. {
  1048. struct sk_buff *skb;
  1049. u32 cmd_id;
  1050. if (!ar->wmi.ops->gen_wow_add_wakeup_event)
  1051. return -EOPNOTSUPP;
  1052. skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
  1053. if (IS_ERR(skb))
  1054. return PTR_ERR(skb);
  1055. cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
  1056. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1057. }
  1058. static inline int
  1059. ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
  1060. {
  1061. struct sk_buff *skb;
  1062. u32 cmd_id;
  1063. if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
  1064. return -EOPNOTSUPP;
  1065. skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
  1066. if (IS_ERR(skb))
  1067. return PTR_ERR(skb);
  1068. cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
  1069. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1070. }
  1071. static inline int
  1072. ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
  1073. const u8 *pattern, const u8 *mask,
  1074. int pattern_len, int pattern_offset)
  1075. {
  1076. struct sk_buff *skb;
  1077. u32 cmd_id;
  1078. if (!ar->wmi.ops->gen_wow_add_pattern)
  1079. return -EOPNOTSUPP;
  1080. skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
  1081. pattern, mask, pattern_len,
  1082. pattern_offset);
  1083. if (IS_ERR(skb))
  1084. return PTR_ERR(skb);
  1085. cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
  1086. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1087. }
  1088. static inline int
  1089. ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
  1090. {
  1091. struct sk_buff *skb;
  1092. u32 cmd_id;
  1093. if (!ar->wmi.ops->gen_wow_del_pattern)
  1094. return -EOPNOTSUPP;
  1095. skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
  1096. if (IS_ERR(skb))
  1097. return PTR_ERR(skb);
  1098. cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
  1099. return ath10k_wmi_cmd_send(ar, skb, cmd_id);
  1100. }
  1101. static inline int
  1102. ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
  1103. enum wmi_tdls_state state)
  1104. {
  1105. struct sk_buff *skb;
  1106. if (!ar->wmi.ops->gen_update_fw_tdls_state)
  1107. return -EOPNOTSUPP;
  1108. skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
  1109. if (IS_ERR(skb))
  1110. return PTR_ERR(skb);
  1111. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
  1112. }
  1113. static inline int
  1114. ath10k_wmi_tdls_peer_update(struct ath10k *ar,
  1115. const struct wmi_tdls_peer_update_cmd_arg *arg,
  1116. const struct wmi_tdls_peer_capab_arg *cap,
  1117. const struct wmi_channel_arg *chan)
  1118. {
  1119. struct sk_buff *skb;
  1120. if (!ar->wmi.ops->gen_tdls_peer_update)
  1121. return -EOPNOTSUPP;
  1122. skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
  1123. if (IS_ERR(skb))
  1124. return PTR_ERR(skb);
  1125. return ath10k_wmi_cmd_send(ar, skb,
  1126. ar->wmi.cmd->tdls_peer_update_cmdid);
  1127. }
  1128. static inline int
  1129. ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
  1130. {
  1131. struct sk_buff *skb;
  1132. if (!ar->wmi.ops->gen_adaptive_qcs)
  1133. return -EOPNOTSUPP;
  1134. skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
  1135. if (IS_ERR(skb))
  1136. return PTR_ERR(skb);
  1137. return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
  1138. }
  1139. static inline int
  1140. ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
  1141. {
  1142. struct sk_buff *skb;
  1143. if (!ar->wmi.ops->gen_pdev_get_tpc_config)
  1144. return -EOPNOTSUPP;
  1145. skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
  1146. if (IS_ERR(skb))
  1147. return PTR_ERR(skb);
  1148. return ath10k_wmi_cmd_send(ar, skb,
  1149. ar->wmi.cmd->pdev_get_tpc_config_cmdid);
  1150. }
  1151. static inline int
  1152. ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
  1153. char *buf)
  1154. {
  1155. if (!ar->wmi.ops->fw_stats_fill)
  1156. return -EOPNOTSUPP;
  1157. ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
  1158. return 0;
  1159. }
  1160. static inline int
  1161. ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
  1162. u32 detect_level, u32 detect_margin)
  1163. {
  1164. struct sk_buff *skb;
  1165. if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
  1166. return -EOPNOTSUPP;
  1167. skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
  1168. detect_level,
  1169. detect_margin);
  1170. if (IS_ERR(skb))
  1171. return PTR_ERR(skb);
  1172. return ath10k_wmi_cmd_send(ar, skb,
  1173. ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
  1174. }
  1175. static inline int
  1176. ath10k_wmi_ext_resource_config(struct ath10k *ar,
  1177. enum wmi_host_platform_type type,
  1178. u32 fw_feature_bitmap)
  1179. {
  1180. struct sk_buff *skb;
  1181. if (!ar->wmi.ops->ext_resource_config)
  1182. return -EOPNOTSUPP;
  1183. skb = ar->wmi.ops->ext_resource_config(ar, type,
  1184. fw_feature_bitmap);
  1185. if (IS_ERR(skb))
  1186. return PTR_ERR(skb);
  1187. return ath10k_wmi_cmd_send(ar, skb,
  1188. ar->wmi.cmd->ext_resource_cfg_cmdid);
  1189. }
  1190. static inline int
  1191. ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
  1192. {
  1193. if (!ar->wmi.ops->get_vdev_subtype)
  1194. return -EOPNOTSUPP;
  1195. return ar->wmi.ops->get_vdev_subtype(ar, subtype);
  1196. }
  1197. static inline int
  1198. ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
  1199. enum wmi_bss_survey_req_type type)
  1200. {
  1201. struct ath10k_wmi *wmi = &ar->wmi;
  1202. struct sk_buff *skb;
  1203. if (!wmi->ops->gen_pdev_bss_chan_info_req)
  1204. return -EOPNOTSUPP;
  1205. skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
  1206. if (IS_ERR(skb))
  1207. return PTR_ERR(skb);
  1208. return ath10k_wmi_cmd_send(ar, skb,
  1209. wmi->cmd->pdev_bss_chan_info_request_cmdid);
  1210. }
  1211. static inline int
  1212. ath10k_wmi_echo(struct ath10k *ar, u32 value)
  1213. {
  1214. struct ath10k_wmi *wmi = &ar->wmi;
  1215. struct sk_buff *skb;
  1216. if (!wmi->ops->gen_echo)
  1217. return -EOPNOTSUPP;
  1218. skb = wmi->ops->gen_echo(ar, value);
  1219. if (IS_ERR(skb))
  1220. return PTR_ERR(skb);
  1221. return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
  1222. }
  1223. static inline int
  1224. ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
  1225. {
  1226. struct sk_buff *skb;
  1227. if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
  1228. return -EOPNOTSUPP;
  1229. skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
  1230. if (IS_ERR(skb))
  1231. return PTR_ERR(skb);
  1232. return ath10k_wmi_cmd_send(ar, skb,
  1233. ar->wmi.cmd->pdev_get_tpc_table_cmdid);
  1234. }
  1235. static inline int
  1236. ath10k_wmi_report_radar_found(struct ath10k *ar,
  1237. const struct ath10k_radar_found_info *arg)
  1238. {
  1239. struct sk_buff *skb;
  1240. if (!ar->wmi.ops->gen_radar_found)
  1241. return -EOPNOTSUPP;
  1242. skb = ar->wmi.ops->gen_radar_found(ar, arg);
  1243. if (IS_ERR(skb))
  1244. return PTR_ERR(skb);
  1245. return ath10k_wmi_cmd_send(ar, skb,
  1246. ar->wmi.cmd->radar_found_cmdid);
  1247. }
  1248. #endif