dpll_netlink.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Generic netlink for DPLL management framework
  4. *
  5. * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
  6. * Copyright (c) 2023 Intel and affiliates
  7. *
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/netdevice.h>
  12. #include <net/genetlink.h>
  13. #include "dpll_core.h"
  14. #include "dpll_netlink.h"
  15. #include "dpll_nl.h"
  16. #include <uapi/linux/dpll.h>
  17. #define ASSERT_NOT_NULL(ptr) (WARN_ON(!ptr))
  18. #define xa_for_each_marked_start(xa, index, entry, filter, start) \
  19. for (index = start, entry = xa_find(xa, &index, ULONG_MAX, filter); \
  20. entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
  21. struct dpll_dump_ctx {
  22. unsigned long idx;
  23. };
  24. static struct dpll_dump_ctx *dpll_dump_context(struct netlink_callback *cb)
  25. {
  26. return (struct dpll_dump_ctx *)cb->ctx;
  27. }
  28. static int
  29. dpll_msg_add_dev_handle(struct sk_buff *msg, struct dpll_device *dpll)
  30. {
  31. if (nla_put_u32(msg, DPLL_A_ID, dpll->id))
  32. return -EMSGSIZE;
  33. return 0;
  34. }
  35. static int
  36. dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
  37. {
  38. if (nla_put_u32(msg, DPLL_A_PIN_PARENT_ID, id))
  39. return -EMSGSIZE;
  40. return 0;
  41. }
  42. /**
  43. * dpll_msg_add_pin_handle - attach pin handle attribute to a given message
  44. * @msg: pointer to sk_buff message to attach a pin handle
  45. * @pin: pin pointer
  46. *
  47. * Return:
  48. * * 0 - success
  49. * * -EMSGSIZE - no space in message to attach pin handle
  50. */
  51. static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
  52. {
  53. if (!pin)
  54. return 0;
  55. if (nla_put_u32(msg, DPLL_A_PIN_ID, pin->id))
  56. return -EMSGSIZE;
  57. return 0;
  58. }
  59. static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev)
  60. {
  61. return rcu_dereference_rtnl(dev->dpll_pin);
  62. }
  63. /**
  64. * dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev
  65. * @dev: netdev from which to get the pin
  66. *
  67. * Return: byte size of pin handle attribute, or 0 if @dev has no pin.
  68. */
  69. size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
  70. {
  71. return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
  72. }
  73. int dpll_netdev_add_pin_handle(struct sk_buff *msg,
  74. const struct net_device *dev)
  75. {
  76. return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev));
  77. }
  78. static int
  79. dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
  80. struct netlink_ext_ack *extack)
  81. {
  82. const struct dpll_device_ops *ops = dpll_device_ops(dpll);
  83. enum dpll_mode mode;
  84. int ret;
  85. ret = ops->mode_get(dpll, dpll_priv(dpll), &mode, extack);
  86. if (ret)
  87. return ret;
  88. if (nla_put_u32(msg, DPLL_A_MODE, mode))
  89. return -EMSGSIZE;
  90. return 0;
  91. }
  92. static int
  93. dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
  94. struct netlink_ext_ack *extack)
  95. {
  96. const struct dpll_device_ops *ops = dpll_device_ops(dpll);
  97. enum dpll_mode mode;
  98. int ret;
  99. /* No mode change is supported now, so the only supported mode is the
  100. * one obtained by mode_get().
  101. */
  102. ret = ops->mode_get(dpll, dpll_priv(dpll), &mode, extack);
  103. if (ret)
  104. return ret;
  105. if (nla_put_u32(msg, DPLL_A_MODE_SUPPORTED, mode))
  106. return -EMSGSIZE;
  107. return 0;
  108. }
  109. static int
  110. dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
  111. struct netlink_ext_ack *extack)
  112. {
  113. const struct dpll_device_ops *ops = dpll_device_ops(dpll);
  114. enum dpll_lock_status_error status_error = 0;
  115. enum dpll_lock_status status;
  116. int ret;
  117. ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status,
  118. &status_error, extack);
  119. if (ret)
  120. return ret;
  121. if (nla_put_u32(msg, DPLL_A_LOCK_STATUS, status))
  122. return -EMSGSIZE;
  123. if (status_error &&
  124. (status == DPLL_LOCK_STATUS_UNLOCKED ||
  125. status == DPLL_LOCK_STATUS_HOLDOVER) &&
  126. nla_put_u32(msg, DPLL_A_LOCK_STATUS_ERROR, status_error))
  127. return -EMSGSIZE;
  128. return 0;
  129. }
  130. static int
  131. dpll_msg_add_temp(struct sk_buff *msg, struct dpll_device *dpll,
  132. struct netlink_ext_ack *extack)
  133. {
  134. const struct dpll_device_ops *ops = dpll_device_ops(dpll);
  135. s32 temp;
  136. int ret;
  137. if (!ops->temp_get)
  138. return 0;
  139. ret = ops->temp_get(dpll, dpll_priv(dpll), &temp, extack);
  140. if (ret)
  141. return ret;
  142. if (nla_put_s32(msg, DPLL_A_TEMP, temp))
  143. return -EMSGSIZE;
  144. return 0;
  145. }
  146. static int
  147. dpll_msg_add_pin_prio(struct sk_buff *msg, struct dpll_pin *pin,
  148. struct dpll_pin_ref *ref,
  149. struct netlink_ext_ack *extack)
  150. {
  151. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  152. struct dpll_device *dpll = ref->dpll;
  153. u32 prio;
  154. int ret;
  155. if (!ops->prio_get)
  156. return 0;
  157. ret = ops->prio_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
  158. dpll_priv(dpll), &prio, extack);
  159. if (ret)
  160. return ret;
  161. if (nla_put_u32(msg, DPLL_A_PIN_PRIO, prio))
  162. return -EMSGSIZE;
  163. return 0;
  164. }
  165. static int
  166. dpll_msg_add_pin_on_dpll_state(struct sk_buff *msg, struct dpll_pin *pin,
  167. struct dpll_pin_ref *ref,
  168. struct netlink_ext_ack *extack)
  169. {
  170. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  171. struct dpll_device *dpll = ref->dpll;
  172. enum dpll_pin_state state;
  173. int ret;
  174. if (!ops->state_on_dpll_get)
  175. return 0;
  176. ret = ops->state_on_dpll_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
  177. dpll, dpll_priv(dpll), &state, extack);
  178. if (ret)
  179. return ret;
  180. if (nla_put_u32(msg, DPLL_A_PIN_STATE, state))
  181. return -EMSGSIZE;
  182. return 0;
  183. }
  184. static int
  185. dpll_msg_add_pin_direction(struct sk_buff *msg, struct dpll_pin *pin,
  186. struct dpll_pin_ref *ref,
  187. struct netlink_ext_ack *extack)
  188. {
  189. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  190. struct dpll_device *dpll = ref->dpll;
  191. enum dpll_pin_direction direction;
  192. int ret;
  193. ret = ops->direction_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
  194. dpll_priv(dpll), &direction, extack);
  195. if (ret)
  196. return ret;
  197. if (nla_put_u32(msg, DPLL_A_PIN_DIRECTION, direction))
  198. return -EMSGSIZE;
  199. return 0;
  200. }
  201. static int
  202. dpll_msg_add_pin_phase_adjust(struct sk_buff *msg, struct dpll_pin *pin,
  203. struct dpll_pin_ref *ref,
  204. struct netlink_ext_ack *extack)
  205. {
  206. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  207. struct dpll_device *dpll = ref->dpll;
  208. s32 phase_adjust;
  209. int ret;
  210. if (!ops->phase_adjust_get)
  211. return 0;
  212. ret = ops->phase_adjust_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
  213. dpll, dpll_priv(dpll),
  214. &phase_adjust, extack);
  215. if (ret)
  216. return ret;
  217. if (nla_put_s32(msg, DPLL_A_PIN_PHASE_ADJUST, phase_adjust))
  218. return -EMSGSIZE;
  219. return 0;
  220. }
  221. static int
  222. dpll_msg_add_phase_offset(struct sk_buff *msg, struct dpll_pin *pin,
  223. struct dpll_pin_ref *ref,
  224. struct netlink_ext_ack *extack)
  225. {
  226. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  227. struct dpll_device *dpll = ref->dpll;
  228. s64 phase_offset;
  229. int ret;
  230. if (!ops->phase_offset_get)
  231. return 0;
  232. ret = ops->phase_offset_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
  233. dpll, dpll_priv(dpll), &phase_offset,
  234. extack);
  235. if (ret)
  236. return ret;
  237. if (nla_put_64bit(msg, DPLL_A_PIN_PHASE_OFFSET, sizeof(phase_offset),
  238. &phase_offset, DPLL_A_PIN_PAD))
  239. return -EMSGSIZE;
  240. return 0;
  241. }
  242. static int dpll_msg_add_ffo(struct sk_buff *msg, struct dpll_pin *pin,
  243. struct dpll_pin_ref *ref,
  244. struct netlink_ext_ack *extack)
  245. {
  246. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  247. struct dpll_device *dpll = ref->dpll;
  248. s64 ffo;
  249. int ret;
  250. if (!ops->ffo_get)
  251. return 0;
  252. ret = ops->ffo_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
  253. dpll, dpll_priv(dpll), &ffo, extack);
  254. if (ret) {
  255. if (ret == -ENODATA)
  256. return 0;
  257. return ret;
  258. }
  259. return nla_put_sint(msg, DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET, ffo);
  260. }
  261. static int
  262. dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
  263. struct dpll_pin_ref *ref, struct netlink_ext_ack *extack)
  264. {
  265. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  266. struct dpll_device *dpll = ref->dpll;
  267. struct nlattr *nest;
  268. int fs, ret;
  269. u64 freq;
  270. if (!ops->frequency_get)
  271. return 0;
  272. ret = ops->frequency_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
  273. dpll_priv(dpll), &freq, extack);
  274. if (ret)
  275. return ret;
  276. if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
  277. DPLL_A_PIN_PAD))
  278. return -EMSGSIZE;
  279. for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
  280. nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
  281. if (!nest)
  282. return -EMSGSIZE;
  283. freq = pin->prop.freq_supported[fs].min;
  284. if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
  285. &freq, DPLL_A_PIN_PAD)) {
  286. nla_nest_cancel(msg, nest);
  287. return -EMSGSIZE;
  288. }
  289. freq = pin->prop.freq_supported[fs].max;
  290. if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
  291. &freq, DPLL_A_PIN_PAD)) {
  292. nla_nest_cancel(msg, nest);
  293. return -EMSGSIZE;
  294. }
  295. nla_nest_end(msg, nest);
  296. }
  297. return 0;
  298. }
  299. static int
  300. dpll_msg_add_pin_esync(struct sk_buff *msg, struct dpll_pin *pin,
  301. struct dpll_pin_ref *ref, struct netlink_ext_ack *extack)
  302. {
  303. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  304. struct dpll_device *dpll = ref->dpll;
  305. struct dpll_pin_esync esync;
  306. struct nlattr *nest;
  307. int ret, i;
  308. if (!ops->esync_get)
  309. return 0;
  310. ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
  311. dpll_priv(dpll), &esync, extack);
  312. if (ret == -EOPNOTSUPP)
  313. return 0;
  314. else if (ret)
  315. return ret;
  316. if (nla_put_64bit(msg, DPLL_A_PIN_ESYNC_FREQUENCY, sizeof(esync.freq),
  317. &esync.freq, DPLL_A_PIN_PAD))
  318. return -EMSGSIZE;
  319. if (nla_put_u32(msg, DPLL_A_PIN_ESYNC_PULSE, esync.pulse))
  320. return -EMSGSIZE;
  321. for (i = 0; i < esync.range_num; i++) {
  322. nest = nla_nest_start(msg,
  323. DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED);
  324. if (!nest)
  325. return -EMSGSIZE;
  326. if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN,
  327. sizeof(esync.range[i].min),
  328. &esync.range[i].min, DPLL_A_PIN_PAD))
  329. goto nest_cancel;
  330. if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX,
  331. sizeof(esync.range[i].max),
  332. &esync.range[i].max, DPLL_A_PIN_PAD))
  333. goto nest_cancel;
  334. nla_nest_end(msg, nest);
  335. }
  336. return 0;
  337. nest_cancel:
  338. nla_nest_cancel(msg, nest);
  339. return -EMSGSIZE;
  340. }
  341. static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
  342. {
  343. int fs;
  344. for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
  345. if (freq >= pin->prop.freq_supported[fs].min &&
  346. freq <= pin->prop.freq_supported[fs].max)
  347. return true;
  348. return false;
  349. }
  350. static int
  351. dpll_msg_add_pin_parents(struct sk_buff *msg, struct dpll_pin *pin,
  352. struct dpll_pin_ref *dpll_ref,
  353. struct netlink_ext_ack *extack)
  354. {
  355. enum dpll_pin_state state;
  356. struct dpll_pin_ref *ref;
  357. struct dpll_pin *ppin;
  358. struct nlattr *nest;
  359. unsigned long index;
  360. int ret;
  361. xa_for_each(&pin->parent_refs, index, ref) {
  362. const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
  363. void *parent_priv;
  364. ppin = ref->pin;
  365. parent_priv = dpll_pin_on_dpll_priv(dpll_ref->dpll, ppin);
  366. ret = ops->state_on_pin_get(pin,
  367. dpll_pin_on_pin_priv(ppin, pin),
  368. ppin, parent_priv, &state, extack);
  369. if (ret)
  370. return ret;
  371. nest = nla_nest_start(msg, DPLL_A_PIN_PARENT_PIN);
  372. if (!nest)
  373. return -EMSGSIZE;
  374. ret = dpll_msg_add_dev_parent_handle(msg, ppin->id);
  375. if (ret)
  376. goto nest_cancel;
  377. if (nla_put_u32(msg, DPLL_A_PIN_STATE, state)) {
  378. ret = -EMSGSIZE;
  379. goto nest_cancel;
  380. }
  381. nla_nest_end(msg, nest);
  382. }
  383. return 0;
  384. nest_cancel:
  385. nla_nest_cancel(msg, nest);
  386. return ret;
  387. }
  388. static int
  389. dpll_msg_add_pin_dplls(struct sk_buff *msg, struct dpll_pin *pin,
  390. struct netlink_ext_ack *extack)
  391. {
  392. struct dpll_pin_ref *ref;
  393. struct nlattr *attr;
  394. unsigned long index;
  395. int ret;
  396. xa_for_each(&pin->dpll_refs, index, ref) {
  397. attr = nla_nest_start(msg, DPLL_A_PIN_PARENT_DEVICE);
  398. if (!attr)
  399. return -EMSGSIZE;
  400. ret = dpll_msg_add_dev_parent_handle(msg, ref->dpll->id);
  401. if (ret)
  402. goto nest_cancel;
  403. ret = dpll_msg_add_pin_on_dpll_state(msg, pin, ref, extack);
  404. if (ret)
  405. goto nest_cancel;
  406. ret = dpll_msg_add_pin_prio(msg, pin, ref, extack);
  407. if (ret)
  408. goto nest_cancel;
  409. ret = dpll_msg_add_pin_direction(msg, pin, ref, extack);
  410. if (ret)
  411. goto nest_cancel;
  412. ret = dpll_msg_add_phase_offset(msg, pin, ref, extack);
  413. if (ret)
  414. goto nest_cancel;
  415. nla_nest_end(msg, attr);
  416. }
  417. return 0;
  418. nest_cancel:
  419. nla_nest_end(msg, attr);
  420. return ret;
  421. }
  422. static int
  423. dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
  424. struct netlink_ext_ack *extack)
  425. {
  426. const struct dpll_pin_properties *prop = &pin->prop;
  427. struct dpll_pin_ref *ref;
  428. int ret;
  429. ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
  430. ASSERT_NOT_NULL(ref);
  431. ret = dpll_msg_add_pin_handle(msg, pin);
  432. if (ret)
  433. return ret;
  434. if (nla_put_string(msg, DPLL_A_PIN_MODULE_NAME,
  435. module_name(pin->module)))
  436. return -EMSGSIZE;
  437. if (nla_put_64bit(msg, DPLL_A_PIN_CLOCK_ID, sizeof(pin->clock_id),
  438. &pin->clock_id, DPLL_A_PIN_PAD))
  439. return -EMSGSIZE;
  440. if (prop->board_label &&
  441. nla_put_string(msg, DPLL_A_PIN_BOARD_LABEL, prop->board_label))
  442. return -EMSGSIZE;
  443. if (prop->panel_label &&
  444. nla_put_string(msg, DPLL_A_PIN_PANEL_LABEL, prop->panel_label))
  445. return -EMSGSIZE;
  446. if (prop->package_label &&
  447. nla_put_string(msg, DPLL_A_PIN_PACKAGE_LABEL,
  448. prop->package_label))
  449. return -EMSGSIZE;
  450. if (nla_put_u32(msg, DPLL_A_PIN_TYPE, prop->type))
  451. return -EMSGSIZE;
  452. if (nla_put_u32(msg, DPLL_A_PIN_CAPABILITIES, prop->capabilities))
  453. return -EMSGSIZE;
  454. ret = dpll_msg_add_pin_freq(msg, pin, ref, extack);
  455. if (ret)
  456. return ret;
  457. if (nla_put_s32(msg, DPLL_A_PIN_PHASE_ADJUST_MIN,
  458. prop->phase_range.min))
  459. return -EMSGSIZE;
  460. if (nla_put_s32(msg, DPLL_A_PIN_PHASE_ADJUST_MAX,
  461. prop->phase_range.max))
  462. return -EMSGSIZE;
  463. ret = dpll_msg_add_pin_phase_adjust(msg, pin, ref, extack);
  464. if (ret)
  465. return ret;
  466. ret = dpll_msg_add_ffo(msg, pin, ref, extack);
  467. if (ret)
  468. return ret;
  469. ret = dpll_msg_add_pin_esync(msg, pin, ref, extack);
  470. if (ret)
  471. return ret;
  472. if (xa_empty(&pin->parent_refs))
  473. ret = dpll_msg_add_pin_dplls(msg, pin, extack);
  474. else
  475. ret = dpll_msg_add_pin_parents(msg, pin, ref, extack);
  476. return ret;
  477. }
  478. static int
  479. dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
  480. struct netlink_ext_ack *extack)
  481. {
  482. int ret;
  483. ret = dpll_msg_add_dev_handle(msg, dpll);
  484. if (ret)
  485. return ret;
  486. if (nla_put_string(msg, DPLL_A_MODULE_NAME, module_name(dpll->module)))
  487. return -EMSGSIZE;
  488. if (nla_put_64bit(msg, DPLL_A_CLOCK_ID, sizeof(dpll->clock_id),
  489. &dpll->clock_id, DPLL_A_PAD))
  490. return -EMSGSIZE;
  491. ret = dpll_msg_add_temp(msg, dpll, extack);
  492. if (ret)
  493. return ret;
  494. ret = dpll_msg_add_lock_status(msg, dpll, extack);
  495. if (ret)
  496. return ret;
  497. ret = dpll_msg_add_mode(msg, dpll, extack);
  498. if (ret)
  499. return ret;
  500. ret = dpll_msg_add_mode_supported(msg, dpll, extack);
  501. if (ret)
  502. return ret;
  503. if (nla_put_u32(msg, DPLL_A_TYPE, dpll->type))
  504. return -EMSGSIZE;
  505. return 0;
  506. }
  507. static int
  508. dpll_device_event_send(enum dpll_cmd event, struct dpll_device *dpll)
  509. {
  510. struct sk_buff *msg;
  511. int ret = -ENOMEM;
  512. void *hdr;
  513. if (WARN_ON(!xa_get_mark(&dpll_device_xa, dpll->id, DPLL_REGISTERED)))
  514. return -ENODEV;
  515. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  516. if (!msg)
  517. return -ENOMEM;
  518. hdr = genlmsg_put(msg, 0, 0, &dpll_nl_family, 0, event);
  519. if (!hdr)
  520. goto err_free_msg;
  521. ret = dpll_device_get_one(dpll, msg, NULL);
  522. if (ret)
  523. goto err_cancel_msg;
  524. genlmsg_end(msg, hdr);
  525. genlmsg_multicast(&dpll_nl_family, msg, 0, 0, GFP_KERNEL);
  526. return 0;
  527. err_cancel_msg:
  528. genlmsg_cancel(msg, hdr);
  529. err_free_msg:
  530. nlmsg_free(msg);
  531. return ret;
  532. }
  533. int dpll_device_create_ntf(struct dpll_device *dpll)
  534. {
  535. return dpll_device_event_send(DPLL_CMD_DEVICE_CREATE_NTF, dpll);
  536. }
  537. int dpll_device_delete_ntf(struct dpll_device *dpll)
  538. {
  539. return dpll_device_event_send(DPLL_CMD_DEVICE_DELETE_NTF, dpll);
  540. }
  541. static int
  542. __dpll_device_change_ntf(struct dpll_device *dpll)
  543. {
  544. return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
  545. }
  546. static bool dpll_pin_available(struct dpll_pin *pin)
  547. {
  548. struct dpll_pin_ref *par_ref;
  549. unsigned long i;
  550. if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
  551. return false;
  552. xa_for_each(&pin->parent_refs, i, par_ref)
  553. if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
  554. DPLL_REGISTERED))
  555. return true;
  556. xa_for_each(&pin->dpll_refs, i, par_ref)
  557. if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
  558. DPLL_REGISTERED))
  559. return true;
  560. return false;
  561. }
  562. /**
  563. * dpll_device_change_ntf - notify that the dpll device has been changed
  564. * @dpll: registered dpll pointer
  565. *
  566. * Context: acquires and holds a dpll_lock.
  567. * Return: 0 if succeeds, error code otherwise.
  568. */
  569. int dpll_device_change_ntf(struct dpll_device *dpll)
  570. {
  571. int ret;
  572. mutex_lock(&dpll_lock);
  573. ret = __dpll_device_change_ntf(dpll);
  574. mutex_unlock(&dpll_lock);
  575. return ret;
  576. }
  577. EXPORT_SYMBOL_GPL(dpll_device_change_ntf);
  578. static int
  579. dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
  580. {
  581. struct sk_buff *msg;
  582. int ret = -ENOMEM;
  583. void *hdr;
  584. if (!dpll_pin_available(pin))
  585. return -ENODEV;
  586. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  587. if (!msg)
  588. return -ENOMEM;
  589. hdr = genlmsg_put(msg, 0, 0, &dpll_nl_family, 0, event);
  590. if (!hdr)
  591. goto err_free_msg;
  592. ret = dpll_cmd_pin_get_one(msg, pin, NULL);
  593. if (ret)
  594. goto err_cancel_msg;
  595. genlmsg_end(msg, hdr);
  596. genlmsg_multicast(&dpll_nl_family, msg, 0, 0, GFP_KERNEL);
  597. return 0;
  598. err_cancel_msg:
  599. genlmsg_cancel(msg, hdr);
  600. err_free_msg:
  601. nlmsg_free(msg);
  602. return ret;
  603. }
  604. int dpll_pin_create_ntf(struct dpll_pin *pin)
  605. {
  606. return dpll_pin_event_send(DPLL_CMD_PIN_CREATE_NTF, pin);
  607. }
  608. int dpll_pin_delete_ntf(struct dpll_pin *pin)
  609. {
  610. return dpll_pin_event_send(DPLL_CMD_PIN_DELETE_NTF, pin);
  611. }
  612. static int __dpll_pin_change_ntf(struct dpll_pin *pin)
  613. {
  614. return dpll_pin_event_send(DPLL_CMD_PIN_CHANGE_NTF, pin);
  615. }
  616. /**
  617. * dpll_pin_change_ntf - notify that the pin has been changed
  618. * @pin: registered pin pointer
  619. *
  620. * Context: acquires and holds a dpll_lock.
  621. * Return: 0 if succeeds, error code otherwise.
  622. */
  623. int dpll_pin_change_ntf(struct dpll_pin *pin)
  624. {
  625. int ret;
  626. mutex_lock(&dpll_lock);
  627. ret = __dpll_pin_change_ntf(pin);
  628. mutex_unlock(&dpll_lock);
  629. return ret;
  630. }
  631. EXPORT_SYMBOL_GPL(dpll_pin_change_ntf);
  632. static int
  633. dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
  634. struct netlink_ext_ack *extack)
  635. {
  636. u64 freq = nla_get_u64(a), old_freq;
  637. struct dpll_pin_ref *ref, *failed;
  638. const struct dpll_pin_ops *ops;
  639. struct dpll_device *dpll;
  640. unsigned long i;
  641. int ret;
  642. if (!dpll_pin_is_freq_supported(pin, freq)) {
  643. NL_SET_ERR_MSG_ATTR(extack, a, "frequency is not supported by the device");
  644. return -EINVAL;
  645. }
  646. xa_for_each(&pin->dpll_refs, i, ref) {
  647. ops = dpll_pin_ops(ref);
  648. if (!ops->frequency_set || !ops->frequency_get) {
  649. NL_SET_ERR_MSG(extack, "frequency set not supported by the device");
  650. return -EOPNOTSUPP;
  651. }
  652. }
  653. ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
  654. ops = dpll_pin_ops(ref);
  655. dpll = ref->dpll;
  656. ret = ops->frequency_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
  657. dpll_priv(dpll), &old_freq, extack);
  658. if (ret) {
  659. NL_SET_ERR_MSG(extack, "unable to get old frequency value");
  660. return ret;
  661. }
  662. if (freq == old_freq)
  663. return 0;
  664. xa_for_each(&pin->dpll_refs, i, ref) {
  665. ops = dpll_pin_ops(ref);
  666. dpll = ref->dpll;
  667. ret = ops->frequency_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
  668. dpll, dpll_priv(dpll), freq, extack);
  669. if (ret) {
  670. failed = ref;
  671. NL_SET_ERR_MSG_FMT(extack, "frequency set failed for dpll_id:%u",
  672. dpll->id);
  673. goto rollback;
  674. }
  675. }
  676. __dpll_pin_change_ntf(pin);
  677. return 0;
  678. rollback:
  679. xa_for_each(&pin->dpll_refs, i, ref) {
  680. if (ref == failed)
  681. break;
  682. ops = dpll_pin_ops(ref);
  683. dpll = ref->dpll;
  684. if (ops->frequency_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
  685. dpll, dpll_priv(dpll), old_freq, extack))
  686. NL_SET_ERR_MSG(extack, "set frequency rollback failed");
  687. }
  688. return ret;
  689. }
  690. static int
  691. dpll_pin_esync_set(struct dpll_pin *pin, struct nlattr *a,
  692. struct netlink_ext_ack *extack)
  693. {
  694. struct dpll_pin_ref *ref, *failed;
  695. const struct dpll_pin_ops *ops;
  696. struct dpll_pin_esync esync;
  697. u64 freq = nla_get_u64(a);
  698. struct dpll_device *dpll;
  699. bool supported = false;
  700. unsigned long i;
  701. int ret;
  702. xa_for_each(&pin->dpll_refs, i, ref) {
  703. ops = dpll_pin_ops(ref);
  704. if (!ops->esync_set || !ops->esync_get) {
  705. NL_SET_ERR_MSG(extack,
  706. "embedded sync feature is not supported by this device");
  707. return -EOPNOTSUPP;
  708. }
  709. }
  710. ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
  711. ops = dpll_pin_ops(ref);
  712. dpll = ref->dpll;
  713. ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
  714. dpll_priv(dpll), &esync, extack);
  715. if (ret) {
  716. NL_SET_ERR_MSG(extack, "unable to get current embedded sync frequency value");
  717. return ret;
  718. }
  719. if (freq == esync.freq)
  720. return 0;
  721. for (i = 0; i < esync.range_num; i++)
  722. if (freq <= esync.range[i].max && freq >= esync.range[i].min)
  723. supported = true;
  724. if (!supported) {
  725. NL_SET_ERR_MSG_ATTR(extack, a,
  726. "requested embedded sync frequency value is not supported by this device");
  727. return -EINVAL;
  728. }
  729. xa_for_each(&pin->dpll_refs, i, ref) {
  730. void *pin_dpll_priv;
  731. ops = dpll_pin_ops(ref);
  732. dpll = ref->dpll;
  733. pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin);
  734. ret = ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll),
  735. freq, extack);
  736. if (ret) {
  737. failed = ref;
  738. NL_SET_ERR_MSG_FMT(extack,
  739. "embedded sync frequency set failed for dpll_id: %u",
  740. dpll->id);
  741. goto rollback;
  742. }
  743. }
  744. __dpll_pin_change_ntf(pin);
  745. return 0;
  746. rollback:
  747. xa_for_each(&pin->dpll_refs, i, ref) {
  748. void *pin_dpll_priv;
  749. if (ref == failed)
  750. break;
  751. ops = dpll_pin_ops(ref);
  752. dpll = ref->dpll;
  753. pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin);
  754. if (ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll),
  755. esync.freq, extack))
  756. NL_SET_ERR_MSG(extack, "set embedded sync frequency rollback failed");
  757. }
  758. return ret;
  759. }
  760. static int
  761. dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
  762. enum dpll_pin_state state,
  763. struct netlink_ext_ack *extack)
  764. {
  765. struct dpll_pin_ref *parent_ref;
  766. const struct dpll_pin_ops *ops;
  767. struct dpll_pin_ref *dpll_ref;
  768. void *pin_priv, *parent_priv;
  769. struct dpll_pin *parent;
  770. unsigned long i;
  771. int ret;
  772. if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
  773. pin->prop.capabilities)) {
  774. NL_SET_ERR_MSG(extack, "state changing is not allowed");
  775. return -EOPNOTSUPP;
  776. }
  777. parent = xa_load(&dpll_pin_xa, parent_idx);
  778. if (!parent)
  779. return -EINVAL;
  780. parent_ref = xa_load(&pin->parent_refs, parent->pin_idx);
  781. if (!parent_ref)
  782. return -EINVAL;
  783. xa_for_each(&parent->dpll_refs, i, dpll_ref) {
  784. ops = dpll_pin_ops(parent_ref);
  785. if (!ops->state_on_pin_set)
  786. return -EOPNOTSUPP;
  787. pin_priv = dpll_pin_on_pin_priv(parent, pin);
  788. parent_priv = dpll_pin_on_dpll_priv(dpll_ref->dpll, parent);
  789. ret = ops->state_on_pin_set(pin, pin_priv, parent, parent_priv,
  790. state, extack);
  791. if (ret)
  792. return ret;
  793. }
  794. __dpll_pin_change_ntf(pin);
  795. return 0;
  796. }
  797. static int
  798. dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
  799. enum dpll_pin_state state,
  800. struct netlink_ext_ack *extack)
  801. {
  802. const struct dpll_pin_ops *ops;
  803. struct dpll_pin_ref *ref;
  804. int ret;
  805. if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
  806. pin->prop.capabilities)) {
  807. NL_SET_ERR_MSG(extack, "state changing is not allowed");
  808. return -EOPNOTSUPP;
  809. }
  810. ref = xa_load(&pin->dpll_refs, dpll->id);
  811. ASSERT_NOT_NULL(ref);
  812. ops = dpll_pin_ops(ref);
  813. if (!ops->state_on_dpll_set)
  814. return -EOPNOTSUPP;
  815. ret = ops->state_on_dpll_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
  816. dpll, dpll_priv(dpll), state, extack);
  817. if (ret)
  818. return ret;
  819. __dpll_pin_change_ntf(pin);
  820. return 0;
  821. }
  822. static int
  823. dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
  824. u32 prio, struct netlink_ext_ack *extack)
  825. {
  826. const struct dpll_pin_ops *ops;
  827. struct dpll_pin_ref *ref;
  828. int ret;
  829. if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
  830. pin->prop.capabilities)) {
  831. NL_SET_ERR_MSG(extack, "prio changing is not allowed");
  832. return -EOPNOTSUPP;
  833. }
  834. ref = xa_load(&pin->dpll_refs, dpll->id);
  835. ASSERT_NOT_NULL(ref);
  836. ops = dpll_pin_ops(ref);
  837. if (!ops->prio_set)
  838. return -EOPNOTSUPP;
  839. ret = ops->prio_set(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
  840. dpll_priv(dpll), prio, extack);
  841. if (ret)
  842. return ret;
  843. __dpll_pin_change_ntf(pin);
  844. return 0;
  845. }
  846. static int
  847. dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
  848. enum dpll_pin_direction direction,
  849. struct netlink_ext_ack *extack)
  850. {
  851. const struct dpll_pin_ops *ops;
  852. struct dpll_pin_ref *ref;
  853. int ret;
  854. if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
  855. pin->prop.capabilities)) {
  856. NL_SET_ERR_MSG(extack, "direction changing is not allowed");
  857. return -EOPNOTSUPP;
  858. }
  859. ref = xa_load(&pin->dpll_refs, dpll->id);
  860. ASSERT_NOT_NULL(ref);
  861. ops = dpll_pin_ops(ref);
  862. if (!ops->direction_set)
  863. return -EOPNOTSUPP;
  864. ret = ops->direction_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
  865. dpll, dpll_priv(dpll), direction, extack);
  866. if (ret)
  867. return ret;
  868. __dpll_pin_change_ntf(pin);
  869. return 0;
  870. }
  871. static int
  872. dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
  873. struct netlink_ext_ack *extack)
  874. {
  875. struct dpll_pin_ref *ref, *failed;
  876. const struct dpll_pin_ops *ops;
  877. s32 phase_adj, old_phase_adj;
  878. struct dpll_device *dpll;
  879. unsigned long i;
  880. int ret;
  881. phase_adj = nla_get_s32(phase_adj_attr);
  882. if (phase_adj > pin->prop.phase_range.max ||
  883. phase_adj < pin->prop.phase_range.min) {
  884. NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
  885. "phase adjust value not supported");
  886. return -EINVAL;
  887. }
  888. xa_for_each(&pin->dpll_refs, i, ref) {
  889. ops = dpll_pin_ops(ref);
  890. if (!ops->phase_adjust_set || !ops->phase_adjust_get) {
  891. NL_SET_ERR_MSG(extack, "phase adjust not supported");
  892. return -EOPNOTSUPP;
  893. }
  894. }
  895. ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
  896. ops = dpll_pin_ops(ref);
  897. dpll = ref->dpll;
  898. ret = ops->phase_adjust_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
  899. dpll, dpll_priv(dpll), &old_phase_adj,
  900. extack);
  901. if (ret) {
  902. NL_SET_ERR_MSG(extack, "unable to get old phase adjust value");
  903. return ret;
  904. }
  905. if (phase_adj == old_phase_adj)
  906. return 0;
  907. xa_for_each(&pin->dpll_refs, i, ref) {
  908. ops = dpll_pin_ops(ref);
  909. dpll = ref->dpll;
  910. ret = ops->phase_adjust_set(pin,
  911. dpll_pin_on_dpll_priv(dpll, pin),
  912. dpll, dpll_priv(dpll), phase_adj,
  913. extack);
  914. if (ret) {
  915. failed = ref;
  916. NL_SET_ERR_MSG_FMT(extack,
  917. "phase adjust set failed for dpll_id:%u",
  918. dpll->id);
  919. goto rollback;
  920. }
  921. }
  922. __dpll_pin_change_ntf(pin);
  923. return 0;
  924. rollback:
  925. xa_for_each(&pin->dpll_refs, i, ref) {
  926. if (ref == failed)
  927. break;
  928. ops = dpll_pin_ops(ref);
  929. dpll = ref->dpll;
  930. if (ops->phase_adjust_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
  931. dpll, dpll_priv(dpll), old_phase_adj,
  932. extack))
  933. NL_SET_ERR_MSG(extack, "set phase adjust rollback failed");
  934. }
  935. return ret;
  936. }
  937. static int
  938. dpll_pin_parent_device_set(struct dpll_pin *pin, struct nlattr *parent_nest,
  939. struct netlink_ext_ack *extack)
  940. {
  941. struct nlattr *tb[DPLL_A_PIN_MAX + 1];
  942. enum dpll_pin_direction direction;
  943. enum dpll_pin_state state;
  944. struct dpll_pin_ref *ref;
  945. struct dpll_device *dpll;
  946. u32 pdpll_idx, prio;
  947. int ret;
  948. nla_parse_nested(tb, DPLL_A_PIN_MAX, parent_nest,
  949. dpll_pin_parent_device_nl_policy, extack);
  950. if (!tb[DPLL_A_PIN_PARENT_ID]) {
  951. NL_SET_ERR_MSG(extack, "device parent id expected");
  952. return -EINVAL;
  953. }
  954. pdpll_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
  955. dpll = xa_load(&dpll_device_xa, pdpll_idx);
  956. if (!dpll) {
  957. NL_SET_ERR_MSG(extack, "parent device not found");
  958. return -EINVAL;
  959. }
  960. ref = xa_load(&pin->dpll_refs, dpll->id);
  961. if (!ref) {
  962. NL_SET_ERR_MSG(extack, "pin not connected to given parent device");
  963. return -EINVAL;
  964. }
  965. if (tb[DPLL_A_PIN_STATE]) {
  966. state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
  967. ret = dpll_pin_state_set(dpll, pin, state, extack);
  968. if (ret)
  969. return ret;
  970. }
  971. if (tb[DPLL_A_PIN_PRIO]) {
  972. prio = nla_get_u32(tb[DPLL_A_PIN_PRIO]);
  973. ret = dpll_pin_prio_set(dpll, pin, prio, extack);
  974. if (ret)
  975. return ret;
  976. }
  977. if (tb[DPLL_A_PIN_DIRECTION]) {
  978. direction = nla_get_u32(tb[DPLL_A_PIN_DIRECTION]);
  979. ret = dpll_pin_direction_set(pin, dpll, direction, extack);
  980. if (ret)
  981. return ret;
  982. }
  983. return 0;
  984. }
  985. static int
  986. dpll_pin_parent_pin_set(struct dpll_pin *pin, struct nlattr *parent_nest,
  987. struct netlink_ext_ack *extack)
  988. {
  989. struct nlattr *tb[DPLL_A_PIN_MAX + 1];
  990. u32 ppin_idx;
  991. int ret;
  992. nla_parse_nested(tb, DPLL_A_PIN_MAX, parent_nest,
  993. dpll_pin_parent_pin_nl_policy, extack);
  994. if (!tb[DPLL_A_PIN_PARENT_ID]) {
  995. NL_SET_ERR_MSG(extack, "device parent id expected");
  996. return -EINVAL;
  997. }
  998. ppin_idx = nla_get_u32(tb[DPLL_A_PIN_PARENT_ID]);
  999. if (tb[DPLL_A_PIN_STATE]) {
  1000. enum dpll_pin_state state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
  1001. ret = dpll_pin_on_pin_state_set(pin, ppin_idx, state, extack);
  1002. if (ret)
  1003. return ret;
  1004. }
  1005. return 0;
  1006. }
  1007. static int
  1008. dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info)
  1009. {
  1010. struct nlattr *a;
  1011. int rem, ret;
  1012. nla_for_each_attr(a, genlmsg_data(info->genlhdr),
  1013. genlmsg_len(info->genlhdr), rem) {
  1014. switch (nla_type(a)) {
  1015. case DPLL_A_PIN_FREQUENCY:
  1016. ret = dpll_pin_freq_set(pin, a, info->extack);
  1017. if (ret)
  1018. return ret;
  1019. break;
  1020. case DPLL_A_PIN_PHASE_ADJUST:
  1021. ret = dpll_pin_phase_adj_set(pin, a, info->extack);
  1022. if (ret)
  1023. return ret;
  1024. break;
  1025. case DPLL_A_PIN_PARENT_DEVICE:
  1026. ret = dpll_pin_parent_device_set(pin, a, info->extack);
  1027. if (ret)
  1028. return ret;
  1029. break;
  1030. case DPLL_A_PIN_PARENT_PIN:
  1031. ret = dpll_pin_parent_pin_set(pin, a, info->extack);
  1032. if (ret)
  1033. return ret;
  1034. break;
  1035. case DPLL_A_PIN_ESYNC_FREQUENCY:
  1036. ret = dpll_pin_esync_set(pin, a, info->extack);
  1037. if (ret)
  1038. return ret;
  1039. break;
  1040. }
  1041. }
  1042. return 0;
  1043. }
  1044. static struct dpll_pin *
  1045. dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
  1046. enum dpll_pin_type type, struct nlattr *board_label,
  1047. struct nlattr *panel_label, struct nlattr *package_label,
  1048. struct netlink_ext_ack *extack)
  1049. {
  1050. bool board_match, panel_match, package_match;
  1051. struct dpll_pin *pin_match = NULL, *pin;
  1052. const struct dpll_pin_properties *prop;
  1053. bool cid_match, mod_match, type_match;
  1054. unsigned long i;
  1055. xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
  1056. prop = &pin->prop;
  1057. cid_match = clock_id ? pin->clock_id == clock_id : true;
  1058. mod_match = mod_name_attr && module_name(pin->module) ?
  1059. !nla_strcmp(mod_name_attr,
  1060. module_name(pin->module)) : true;
  1061. type_match = type ? prop->type == type : true;
  1062. board_match = board_label ? (prop->board_label ?
  1063. !nla_strcmp(board_label, prop->board_label) : false) :
  1064. true;
  1065. panel_match = panel_label ? (prop->panel_label ?
  1066. !nla_strcmp(panel_label, prop->panel_label) : false) :
  1067. true;
  1068. package_match = package_label ? (prop->package_label ?
  1069. !nla_strcmp(package_label, prop->package_label) :
  1070. false) : true;
  1071. if (cid_match && mod_match && type_match && board_match &&
  1072. panel_match && package_match) {
  1073. if (pin_match) {
  1074. NL_SET_ERR_MSG(extack, "multiple matches");
  1075. return ERR_PTR(-EINVAL);
  1076. }
  1077. pin_match = pin;
  1078. }
  1079. }
  1080. if (!pin_match) {
  1081. NL_SET_ERR_MSG(extack, "not found");
  1082. return ERR_PTR(-ENODEV);
  1083. }
  1084. return pin_match;
  1085. }
  1086. static struct dpll_pin *dpll_pin_find_from_nlattr(struct genl_info *info)
  1087. {
  1088. struct nlattr *attr, *mod_name_attr = NULL, *board_label_attr = NULL,
  1089. *panel_label_attr = NULL, *package_label_attr = NULL;
  1090. enum dpll_pin_type type = 0;
  1091. u64 clock_id = 0;
  1092. int rem = 0;
  1093. nla_for_each_attr(attr, genlmsg_data(info->genlhdr),
  1094. genlmsg_len(info->genlhdr), rem) {
  1095. switch (nla_type(attr)) {
  1096. case DPLL_A_PIN_CLOCK_ID:
  1097. if (clock_id)
  1098. goto duplicated_attr;
  1099. clock_id = nla_get_u64(attr);
  1100. break;
  1101. case DPLL_A_PIN_MODULE_NAME:
  1102. if (mod_name_attr)
  1103. goto duplicated_attr;
  1104. mod_name_attr = attr;
  1105. break;
  1106. case DPLL_A_PIN_TYPE:
  1107. if (type)
  1108. goto duplicated_attr;
  1109. type = nla_get_u32(attr);
  1110. break;
  1111. case DPLL_A_PIN_BOARD_LABEL:
  1112. if (board_label_attr)
  1113. goto duplicated_attr;
  1114. board_label_attr = attr;
  1115. break;
  1116. case DPLL_A_PIN_PANEL_LABEL:
  1117. if (panel_label_attr)
  1118. goto duplicated_attr;
  1119. panel_label_attr = attr;
  1120. break;
  1121. case DPLL_A_PIN_PACKAGE_LABEL:
  1122. if (package_label_attr)
  1123. goto duplicated_attr;
  1124. package_label_attr = attr;
  1125. break;
  1126. default:
  1127. break;
  1128. }
  1129. }
  1130. if (!(clock_id || mod_name_attr || board_label_attr ||
  1131. panel_label_attr || package_label_attr)) {
  1132. NL_SET_ERR_MSG(info->extack, "missing attributes");
  1133. return ERR_PTR(-EINVAL);
  1134. }
  1135. return dpll_pin_find(clock_id, mod_name_attr, type, board_label_attr,
  1136. panel_label_attr, package_label_attr,
  1137. info->extack);
  1138. duplicated_attr:
  1139. NL_SET_ERR_MSG(info->extack, "duplicated attribute");
  1140. return ERR_PTR(-EINVAL);
  1141. }
  1142. int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
  1143. {
  1144. struct dpll_pin *pin;
  1145. struct sk_buff *msg;
  1146. struct nlattr *hdr;
  1147. int ret;
  1148. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  1149. if (!msg)
  1150. return -ENOMEM;
  1151. hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
  1152. DPLL_CMD_PIN_ID_GET);
  1153. if (!hdr) {
  1154. nlmsg_free(msg);
  1155. return -EMSGSIZE;
  1156. }
  1157. pin = dpll_pin_find_from_nlattr(info);
  1158. if (!IS_ERR(pin)) {
  1159. if (!dpll_pin_available(pin)) {
  1160. nlmsg_free(msg);
  1161. return -ENODEV;
  1162. }
  1163. ret = dpll_msg_add_pin_handle(msg, pin);
  1164. if (ret) {
  1165. nlmsg_free(msg);
  1166. return ret;
  1167. }
  1168. }
  1169. genlmsg_end(msg, hdr);
  1170. return genlmsg_reply(msg, info);
  1171. }
  1172. int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info)
  1173. {
  1174. struct dpll_pin *pin = info->user_ptr[0];
  1175. struct sk_buff *msg;
  1176. struct nlattr *hdr;
  1177. int ret;
  1178. if (!pin)
  1179. return -ENODEV;
  1180. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  1181. if (!msg)
  1182. return -ENOMEM;
  1183. hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
  1184. DPLL_CMD_PIN_GET);
  1185. if (!hdr) {
  1186. nlmsg_free(msg);
  1187. return -EMSGSIZE;
  1188. }
  1189. ret = dpll_cmd_pin_get_one(msg, pin, info->extack);
  1190. if (ret) {
  1191. nlmsg_free(msg);
  1192. return ret;
  1193. }
  1194. genlmsg_end(msg, hdr);
  1195. return genlmsg_reply(msg, info);
  1196. }
  1197. int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
  1198. {
  1199. struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
  1200. struct dpll_pin *pin;
  1201. struct nlattr *hdr;
  1202. unsigned long i;
  1203. int ret = 0;
  1204. mutex_lock(&dpll_lock);
  1205. xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
  1206. ctx->idx) {
  1207. if (!dpll_pin_available(pin))
  1208. continue;
  1209. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  1210. cb->nlh->nlmsg_seq,
  1211. &dpll_nl_family, NLM_F_MULTI,
  1212. DPLL_CMD_PIN_GET);
  1213. if (!hdr) {
  1214. ret = -EMSGSIZE;
  1215. break;
  1216. }
  1217. ret = dpll_cmd_pin_get_one(skb, pin, cb->extack);
  1218. if (ret) {
  1219. genlmsg_cancel(skb, hdr);
  1220. break;
  1221. }
  1222. genlmsg_end(skb, hdr);
  1223. }
  1224. mutex_unlock(&dpll_lock);
  1225. if (ret == -EMSGSIZE) {
  1226. ctx->idx = i;
  1227. return skb->len;
  1228. }
  1229. return ret;
  1230. }
  1231. int dpll_nl_pin_set_doit(struct sk_buff *skb, struct genl_info *info)
  1232. {
  1233. struct dpll_pin *pin = info->user_ptr[0];
  1234. return dpll_pin_set_from_nlattr(pin, info);
  1235. }
  1236. static struct dpll_device *
  1237. dpll_device_find(u64 clock_id, struct nlattr *mod_name_attr,
  1238. enum dpll_type type, struct netlink_ext_ack *extack)
  1239. {
  1240. struct dpll_device *dpll_match = NULL, *dpll;
  1241. bool cid_match, mod_match, type_match;
  1242. unsigned long i;
  1243. xa_for_each_marked(&dpll_device_xa, i, dpll, DPLL_REGISTERED) {
  1244. cid_match = clock_id ? dpll->clock_id == clock_id : true;
  1245. mod_match = mod_name_attr ? (module_name(dpll->module) ?
  1246. !nla_strcmp(mod_name_attr,
  1247. module_name(dpll->module)) : false) : true;
  1248. type_match = type ? dpll->type == type : true;
  1249. if (cid_match && mod_match && type_match) {
  1250. if (dpll_match) {
  1251. NL_SET_ERR_MSG(extack, "multiple matches");
  1252. return ERR_PTR(-EINVAL);
  1253. }
  1254. dpll_match = dpll;
  1255. }
  1256. }
  1257. if (!dpll_match) {
  1258. NL_SET_ERR_MSG(extack, "not found");
  1259. return ERR_PTR(-ENODEV);
  1260. }
  1261. return dpll_match;
  1262. }
  1263. static struct dpll_device *
  1264. dpll_device_find_from_nlattr(struct genl_info *info)
  1265. {
  1266. struct nlattr *attr, *mod_name_attr = NULL;
  1267. enum dpll_type type = 0;
  1268. u64 clock_id = 0;
  1269. int rem = 0;
  1270. nla_for_each_attr(attr, genlmsg_data(info->genlhdr),
  1271. genlmsg_len(info->genlhdr), rem) {
  1272. switch (nla_type(attr)) {
  1273. case DPLL_A_CLOCK_ID:
  1274. if (clock_id)
  1275. goto duplicated_attr;
  1276. clock_id = nla_get_u64(attr);
  1277. break;
  1278. case DPLL_A_MODULE_NAME:
  1279. if (mod_name_attr)
  1280. goto duplicated_attr;
  1281. mod_name_attr = attr;
  1282. break;
  1283. case DPLL_A_TYPE:
  1284. if (type)
  1285. goto duplicated_attr;
  1286. type = nla_get_u32(attr);
  1287. break;
  1288. default:
  1289. break;
  1290. }
  1291. }
  1292. if (!clock_id && !mod_name_attr && !type) {
  1293. NL_SET_ERR_MSG(info->extack, "missing attributes");
  1294. return ERR_PTR(-EINVAL);
  1295. }
  1296. return dpll_device_find(clock_id, mod_name_attr, type, info->extack);
  1297. duplicated_attr:
  1298. NL_SET_ERR_MSG(info->extack, "duplicated attribute");
  1299. return ERR_PTR(-EINVAL);
  1300. }
  1301. int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
  1302. {
  1303. struct dpll_device *dpll;
  1304. struct sk_buff *msg;
  1305. struct nlattr *hdr;
  1306. int ret;
  1307. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  1308. if (!msg)
  1309. return -ENOMEM;
  1310. hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
  1311. DPLL_CMD_DEVICE_ID_GET);
  1312. if (!hdr) {
  1313. nlmsg_free(msg);
  1314. return -EMSGSIZE;
  1315. }
  1316. dpll = dpll_device_find_from_nlattr(info);
  1317. if (!IS_ERR(dpll)) {
  1318. ret = dpll_msg_add_dev_handle(msg, dpll);
  1319. if (ret) {
  1320. nlmsg_free(msg);
  1321. return ret;
  1322. }
  1323. }
  1324. genlmsg_end(msg, hdr);
  1325. return genlmsg_reply(msg, info);
  1326. }
  1327. int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
  1328. {
  1329. struct dpll_device *dpll = info->user_ptr[0];
  1330. struct sk_buff *msg;
  1331. struct nlattr *hdr;
  1332. int ret;
  1333. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  1334. if (!msg)
  1335. return -ENOMEM;
  1336. hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
  1337. DPLL_CMD_DEVICE_GET);
  1338. if (!hdr) {
  1339. nlmsg_free(msg);
  1340. return -EMSGSIZE;
  1341. }
  1342. ret = dpll_device_get_one(dpll, msg, info->extack);
  1343. if (ret) {
  1344. nlmsg_free(msg);
  1345. return ret;
  1346. }
  1347. genlmsg_end(msg, hdr);
  1348. return genlmsg_reply(msg, info);
  1349. }
  1350. int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
  1351. {
  1352. /* placeholder for set command */
  1353. return 0;
  1354. }
  1355. int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
  1356. {
  1357. struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
  1358. struct dpll_device *dpll;
  1359. struct nlattr *hdr;
  1360. unsigned long i;
  1361. int ret = 0;
  1362. mutex_lock(&dpll_lock);
  1363. xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED,
  1364. ctx->idx) {
  1365. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  1366. cb->nlh->nlmsg_seq, &dpll_nl_family,
  1367. NLM_F_MULTI, DPLL_CMD_DEVICE_GET);
  1368. if (!hdr) {
  1369. ret = -EMSGSIZE;
  1370. break;
  1371. }
  1372. ret = dpll_device_get_one(dpll, skb, cb->extack);
  1373. if (ret) {
  1374. genlmsg_cancel(skb, hdr);
  1375. break;
  1376. }
  1377. genlmsg_end(skb, hdr);
  1378. }
  1379. mutex_unlock(&dpll_lock);
  1380. if (ret == -EMSGSIZE) {
  1381. ctx->idx = i;
  1382. return skb->len;
  1383. }
  1384. return ret;
  1385. }
  1386. int dpll_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
  1387. struct genl_info *info)
  1388. {
  1389. u32 id;
  1390. if (GENL_REQ_ATTR_CHECK(info, DPLL_A_ID))
  1391. return -EINVAL;
  1392. mutex_lock(&dpll_lock);
  1393. id = nla_get_u32(info->attrs[DPLL_A_ID]);
  1394. info->user_ptr[0] = dpll_device_get_by_id(id);
  1395. if (!info->user_ptr[0]) {
  1396. NL_SET_ERR_MSG(info->extack, "device not found");
  1397. goto unlock;
  1398. }
  1399. return 0;
  1400. unlock:
  1401. mutex_unlock(&dpll_lock);
  1402. return -ENODEV;
  1403. }
  1404. void dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
  1405. struct genl_info *info)
  1406. {
  1407. mutex_unlock(&dpll_lock);
  1408. }
  1409. int
  1410. dpll_lock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
  1411. struct genl_info *info)
  1412. {
  1413. mutex_lock(&dpll_lock);
  1414. return 0;
  1415. }
  1416. void
  1417. dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
  1418. struct genl_info *info)
  1419. {
  1420. mutex_unlock(&dpll_lock);
  1421. }
  1422. int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
  1423. struct genl_info *info)
  1424. {
  1425. int ret;
  1426. mutex_lock(&dpll_lock);
  1427. if (GENL_REQ_ATTR_CHECK(info, DPLL_A_PIN_ID)) {
  1428. ret = -EINVAL;
  1429. goto unlock_dev;
  1430. }
  1431. info->user_ptr[0] = xa_load(&dpll_pin_xa,
  1432. nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
  1433. if (!info->user_ptr[0] ||
  1434. !dpll_pin_available(info->user_ptr[0])) {
  1435. NL_SET_ERR_MSG(info->extack, "pin not found");
  1436. ret = -ENODEV;
  1437. goto unlock_dev;
  1438. }
  1439. return 0;
  1440. unlock_dev:
  1441. mutex_unlock(&dpll_lock);
  1442. return ret;
  1443. }
  1444. void dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
  1445. struct genl_info *info)
  1446. {
  1447. mutex_unlock(&dpll_lock);
  1448. }