clk_test.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Kunit tests for clk framework
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/clk-provider.h>
  7. #include <linux/of.h>
  8. #include <linux/platform_device.h>
  9. /* Needed for clk_hw_get_clk() */
  10. #include "clk.h"
  11. #include <kunit/clk.h>
  12. #include <kunit/of.h>
  13. #include <kunit/platform_device.h>
  14. #include <kunit/test.h>
  15. #include "clk_parent_data_test.h"
  16. static const struct clk_ops empty_clk_ops = { };
  17. #define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
  18. #define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
  19. #define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
  20. struct clk_dummy_context {
  21. struct clk_hw hw;
  22. unsigned long rate;
  23. };
  24. static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
  25. unsigned long parent_rate)
  26. {
  27. struct clk_dummy_context *ctx =
  28. container_of(hw, struct clk_dummy_context, hw);
  29. return ctx->rate;
  30. }
  31. static int clk_dummy_determine_rate(struct clk_hw *hw,
  32. struct clk_rate_request *req)
  33. {
  34. /* Just return the same rate without modifying it */
  35. return 0;
  36. }
  37. static int clk_dummy_maximize_rate(struct clk_hw *hw,
  38. struct clk_rate_request *req)
  39. {
  40. /*
  41. * If there's a maximum set, always run the clock at the maximum
  42. * allowed.
  43. */
  44. if (req->max_rate < ULONG_MAX)
  45. req->rate = req->max_rate;
  46. return 0;
  47. }
  48. static int clk_dummy_minimize_rate(struct clk_hw *hw,
  49. struct clk_rate_request *req)
  50. {
  51. /*
  52. * If there's a minimum set, always run the clock at the minimum
  53. * allowed.
  54. */
  55. if (req->min_rate > 0)
  56. req->rate = req->min_rate;
  57. return 0;
  58. }
  59. static int clk_dummy_set_rate(struct clk_hw *hw,
  60. unsigned long rate,
  61. unsigned long parent_rate)
  62. {
  63. struct clk_dummy_context *ctx =
  64. container_of(hw, struct clk_dummy_context, hw);
  65. ctx->rate = rate;
  66. return 0;
  67. }
  68. static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
  69. {
  70. if (index >= clk_hw_get_num_parents(hw))
  71. return -EINVAL;
  72. return 0;
  73. }
  74. static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
  75. {
  76. return 0;
  77. }
  78. static const struct clk_ops clk_dummy_rate_ops = {
  79. .recalc_rate = clk_dummy_recalc_rate,
  80. .determine_rate = clk_dummy_determine_rate,
  81. .set_rate = clk_dummy_set_rate,
  82. };
  83. static const struct clk_ops clk_dummy_maximize_rate_ops = {
  84. .recalc_rate = clk_dummy_recalc_rate,
  85. .determine_rate = clk_dummy_maximize_rate,
  86. .set_rate = clk_dummy_set_rate,
  87. };
  88. static const struct clk_ops clk_dummy_minimize_rate_ops = {
  89. .recalc_rate = clk_dummy_recalc_rate,
  90. .determine_rate = clk_dummy_minimize_rate,
  91. .set_rate = clk_dummy_set_rate,
  92. };
  93. static const struct clk_ops clk_dummy_single_parent_ops = {
  94. /*
  95. * FIXME: Even though we should probably be able to use
  96. * __clk_mux_determine_rate() here, if we use it and call
  97. * clk_round_rate() or clk_set_rate() with a rate lower than
  98. * what all the parents can provide, it will return -EINVAL.
  99. *
  100. * This is due to the fact that it has the undocumented
  101. * behaviour to always pick up the closest rate higher than the
  102. * requested rate. If we get something lower, it thus considers
  103. * that it's not acceptable and will return an error.
  104. *
  105. * It's somewhat inconsistent and creates a weird threshold
  106. * between rates above the parent rate which would be rounded to
  107. * what the parent can provide, but rates below will simply
  108. * return an error.
  109. */
  110. .determine_rate = __clk_mux_determine_rate_closest,
  111. .set_parent = clk_dummy_single_set_parent,
  112. .get_parent = clk_dummy_single_get_parent,
  113. };
  114. struct clk_multiple_parent_ctx {
  115. struct clk_dummy_context parents_ctx[2];
  116. struct clk_hw hw;
  117. u8 current_parent;
  118. };
  119. static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
  120. {
  121. struct clk_multiple_parent_ctx *ctx =
  122. container_of(hw, struct clk_multiple_parent_ctx, hw);
  123. if (index >= clk_hw_get_num_parents(hw))
  124. return -EINVAL;
  125. ctx->current_parent = index;
  126. return 0;
  127. }
  128. static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
  129. {
  130. struct clk_multiple_parent_ctx *ctx =
  131. container_of(hw, struct clk_multiple_parent_ctx, hw);
  132. return ctx->current_parent;
  133. }
  134. static const struct clk_ops clk_multiple_parents_mux_ops = {
  135. .get_parent = clk_multiple_parents_mux_get_parent,
  136. .set_parent = clk_multiple_parents_mux_set_parent,
  137. .determine_rate = __clk_mux_determine_rate_closest,
  138. };
  139. static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
  140. .determine_rate = clk_hw_determine_rate_no_reparent,
  141. .get_parent = clk_multiple_parents_mux_get_parent,
  142. .set_parent = clk_multiple_parents_mux_set_parent,
  143. };
  144. static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
  145. {
  146. struct clk_dummy_context *ctx;
  147. struct clk_init_data init = { };
  148. int ret;
  149. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  150. if (!ctx)
  151. return -ENOMEM;
  152. ctx->rate = DUMMY_CLOCK_INIT_RATE;
  153. test->priv = ctx;
  154. init.name = "test_dummy_rate";
  155. init.ops = ops;
  156. ctx->hw.init = &init;
  157. ret = clk_hw_register(NULL, &ctx->hw);
  158. if (ret)
  159. return ret;
  160. return 0;
  161. }
  162. static int clk_test_init(struct kunit *test)
  163. {
  164. return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
  165. }
  166. static int clk_maximize_test_init(struct kunit *test)
  167. {
  168. return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
  169. }
  170. static int clk_minimize_test_init(struct kunit *test)
  171. {
  172. return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
  173. }
  174. static void clk_test_exit(struct kunit *test)
  175. {
  176. struct clk_dummy_context *ctx = test->priv;
  177. clk_hw_unregister(&ctx->hw);
  178. }
  179. /*
  180. * Test that the actual rate matches what is returned by clk_get_rate()
  181. */
  182. static void clk_test_get_rate(struct kunit *test)
  183. {
  184. struct clk_dummy_context *ctx = test->priv;
  185. struct clk_hw *hw = &ctx->hw;
  186. struct clk *clk = clk_hw_get_clk(hw, NULL);
  187. unsigned long rate;
  188. rate = clk_get_rate(clk);
  189. KUNIT_ASSERT_GT(test, rate, 0);
  190. KUNIT_EXPECT_EQ(test, rate, ctx->rate);
  191. clk_put(clk);
  192. }
  193. /*
  194. * Test that, after a call to clk_set_rate(), the rate returned by
  195. * clk_get_rate() matches.
  196. *
  197. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  198. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  199. */
  200. static void clk_test_set_get_rate(struct kunit *test)
  201. {
  202. struct clk_dummy_context *ctx = test->priv;
  203. struct clk_hw *hw = &ctx->hw;
  204. struct clk *clk = clk_hw_get_clk(hw, NULL);
  205. unsigned long rate;
  206. KUNIT_ASSERT_EQ(test,
  207. clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
  208. 0);
  209. rate = clk_get_rate(clk);
  210. KUNIT_ASSERT_GT(test, rate, 0);
  211. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  212. clk_put(clk);
  213. }
  214. /*
  215. * Test that, after several calls to clk_set_rate(), the rate returned
  216. * by clk_get_rate() matches the last one.
  217. *
  218. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  219. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  220. */
  221. static void clk_test_set_set_get_rate(struct kunit *test)
  222. {
  223. struct clk_dummy_context *ctx = test->priv;
  224. struct clk_hw *hw = &ctx->hw;
  225. struct clk *clk = clk_hw_get_clk(hw, NULL);
  226. unsigned long rate;
  227. KUNIT_ASSERT_EQ(test,
  228. clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
  229. 0);
  230. KUNIT_ASSERT_EQ(test,
  231. clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
  232. 0);
  233. rate = clk_get_rate(clk);
  234. KUNIT_ASSERT_GT(test, rate, 0);
  235. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  236. clk_put(clk);
  237. }
  238. /*
  239. * Test that clk_round_rate and clk_set_rate are consitent and will
  240. * return the same frequency.
  241. */
  242. static void clk_test_round_set_get_rate(struct kunit *test)
  243. {
  244. struct clk_dummy_context *ctx = test->priv;
  245. struct clk_hw *hw = &ctx->hw;
  246. struct clk *clk = clk_hw_get_clk(hw, NULL);
  247. unsigned long set_rate;
  248. long rounded_rate;
  249. rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
  250. KUNIT_ASSERT_GT(test, rounded_rate, 0);
  251. KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
  252. KUNIT_ASSERT_EQ(test,
  253. clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
  254. 0);
  255. set_rate = clk_get_rate(clk);
  256. KUNIT_ASSERT_GT(test, set_rate, 0);
  257. KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
  258. clk_put(clk);
  259. }
  260. static struct kunit_case clk_test_cases[] = {
  261. KUNIT_CASE(clk_test_get_rate),
  262. KUNIT_CASE(clk_test_set_get_rate),
  263. KUNIT_CASE(clk_test_set_set_get_rate),
  264. KUNIT_CASE(clk_test_round_set_get_rate),
  265. {}
  266. };
  267. /*
  268. * Test suite for a basic rate clock, without any parent.
  269. *
  270. * These tests exercise the rate API with simple scenarios
  271. */
  272. static struct kunit_suite clk_test_suite = {
  273. .name = "clk-test",
  274. .init = clk_test_init,
  275. .exit = clk_test_exit,
  276. .test_cases = clk_test_cases,
  277. };
  278. static int clk_uncached_test_init(struct kunit *test)
  279. {
  280. struct clk_dummy_context *ctx;
  281. int ret;
  282. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  283. if (!ctx)
  284. return -ENOMEM;
  285. test->priv = ctx;
  286. ctx->rate = DUMMY_CLOCK_INIT_RATE;
  287. ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
  288. &clk_dummy_rate_ops,
  289. CLK_GET_RATE_NOCACHE);
  290. ret = clk_hw_register(NULL, &ctx->hw);
  291. if (ret)
  292. return ret;
  293. return 0;
  294. }
  295. /*
  296. * Test that for an uncached clock, the clock framework doesn't cache
  297. * the rate and clk_get_rate() will return the underlying clock rate
  298. * even if it changed.
  299. */
  300. static void clk_test_uncached_get_rate(struct kunit *test)
  301. {
  302. struct clk_dummy_context *ctx = test->priv;
  303. struct clk_hw *hw = &ctx->hw;
  304. struct clk *clk = clk_hw_get_clk(hw, NULL);
  305. unsigned long rate;
  306. rate = clk_get_rate(clk);
  307. KUNIT_ASSERT_GT(test, rate, 0);
  308. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
  309. /* We change the rate behind the clock framework's back */
  310. ctx->rate = DUMMY_CLOCK_RATE_1;
  311. rate = clk_get_rate(clk);
  312. KUNIT_ASSERT_GT(test, rate, 0);
  313. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  314. clk_put(clk);
  315. }
  316. /*
  317. * Test that for an uncached clock, clk_set_rate_range() will work
  318. * properly if the rate hasn't changed.
  319. */
  320. static void clk_test_uncached_set_range(struct kunit *test)
  321. {
  322. struct clk_dummy_context *ctx = test->priv;
  323. struct clk_hw *hw = &ctx->hw;
  324. struct clk *clk = clk_hw_get_clk(hw, NULL);
  325. unsigned long rate;
  326. KUNIT_ASSERT_EQ(test,
  327. clk_set_rate_range(clk,
  328. DUMMY_CLOCK_RATE_1,
  329. DUMMY_CLOCK_RATE_2),
  330. 0);
  331. rate = clk_get_rate(clk);
  332. KUNIT_ASSERT_GT(test, rate, 0);
  333. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  334. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  335. clk_put(clk);
  336. }
  337. /*
  338. * Test that for an uncached clock, clk_set_rate_range() will work
  339. * properly if the rate has changed in hardware.
  340. *
  341. * In this case, it means that if the rate wasn't initially in the range
  342. * we're trying to set, but got changed at some point into the range
  343. * without the kernel knowing about it, its rate shouldn't be affected.
  344. */
  345. static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
  346. {
  347. struct clk_dummy_context *ctx = test->priv;
  348. struct clk_hw *hw = &ctx->hw;
  349. struct clk *clk = clk_hw_get_clk(hw, NULL);
  350. unsigned long rate;
  351. /* We change the rate behind the clock framework's back */
  352. ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
  353. KUNIT_ASSERT_EQ(test,
  354. clk_set_rate_range(clk,
  355. DUMMY_CLOCK_RATE_1,
  356. DUMMY_CLOCK_RATE_2),
  357. 0);
  358. rate = clk_get_rate(clk);
  359. KUNIT_ASSERT_GT(test, rate, 0);
  360. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  361. clk_put(clk);
  362. }
  363. static struct kunit_case clk_uncached_test_cases[] = {
  364. KUNIT_CASE(clk_test_uncached_get_rate),
  365. KUNIT_CASE(clk_test_uncached_set_range),
  366. KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
  367. {}
  368. };
  369. /*
  370. * Test suite for a basic, uncached, rate clock, without any parent.
  371. *
  372. * These tests exercise the rate API with simple scenarios
  373. */
  374. static struct kunit_suite clk_uncached_test_suite = {
  375. .name = "clk-uncached-test",
  376. .init = clk_uncached_test_init,
  377. .exit = clk_test_exit,
  378. .test_cases = clk_uncached_test_cases,
  379. };
  380. static int
  381. clk_multiple_parents_mux_test_init(struct kunit *test)
  382. {
  383. struct clk_multiple_parent_ctx *ctx;
  384. const char *parents[2] = { "parent-0", "parent-1"};
  385. int ret;
  386. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  387. if (!ctx)
  388. return -ENOMEM;
  389. test->priv = ctx;
  390. ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
  391. &clk_dummy_rate_ops,
  392. 0);
  393. ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
  394. ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw);
  395. if (ret)
  396. return ret;
  397. ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
  398. &clk_dummy_rate_ops,
  399. 0);
  400. ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
  401. ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
  402. if (ret)
  403. return ret;
  404. ctx->current_parent = 0;
  405. ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
  406. &clk_multiple_parents_mux_ops,
  407. CLK_SET_RATE_PARENT);
  408. ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
  409. if (ret)
  410. return ret;
  411. return 0;
  412. }
  413. /*
  414. * Test that for a clock with multiple parents, clk_get_parent()
  415. * actually returns the current one.
  416. */
  417. static void
  418. clk_test_multiple_parents_mux_get_parent(struct kunit *test)
  419. {
  420. struct clk_multiple_parent_ctx *ctx = test->priv;
  421. struct clk_hw *hw = &ctx->hw;
  422. struct clk *clk = clk_hw_get_clk(hw, NULL);
  423. struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
  424. KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
  425. clk_put(parent);
  426. clk_put(clk);
  427. }
  428. /*
  429. * Test that for a clock with a multiple parents, clk_has_parent()
  430. * actually reports all of them as parents.
  431. */
  432. static void
  433. clk_test_multiple_parents_mux_has_parent(struct kunit *test)
  434. {
  435. struct clk_multiple_parent_ctx *ctx = test->priv;
  436. struct clk_hw *hw = &ctx->hw;
  437. struct clk *clk = clk_hw_get_clk(hw, NULL);
  438. struct clk *parent;
  439. parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
  440. KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
  441. clk_put(parent);
  442. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  443. KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
  444. clk_put(parent);
  445. clk_put(clk);
  446. }
  447. /*
  448. * Test that for a clock with a multiple parents, if we set a range on
  449. * that clock and the parent is changed, its rate after the reparenting
  450. * is still within the range we asked for.
  451. *
  452. * FIXME: clk_set_parent() only does the reparenting but doesn't
  453. * reevaluate whether the new clock rate is within its boundaries or
  454. * not.
  455. */
  456. static void
  457. clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
  458. {
  459. struct clk_multiple_parent_ctx *ctx = test->priv;
  460. struct clk_hw *hw = &ctx->hw;
  461. struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
  462. struct clk *parent1, *parent2;
  463. unsigned long rate;
  464. int ret;
  465. kunit_skip(test, "This needs to be fixed in the core.");
  466. parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL);
  467. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
  468. KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
  469. parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
  470. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
  471. ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
  472. KUNIT_ASSERT_EQ(test, ret, 0);
  473. ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
  474. KUNIT_ASSERT_EQ(test, ret, 0);
  475. ret = clk_set_rate_range(clk,
  476. DUMMY_CLOCK_RATE_1 - 1000,
  477. DUMMY_CLOCK_RATE_1 + 1000);
  478. KUNIT_ASSERT_EQ(test, ret, 0);
  479. ret = clk_set_parent(clk, parent2);
  480. KUNIT_ASSERT_EQ(test, ret, 0);
  481. rate = clk_get_rate(clk);
  482. KUNIT_ASSERT_GT(test, rate, 0);
  483. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
  484. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  485. }
  486. static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
  487. KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
  488. KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
  489. KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
  490. {}
  491. };
  492. /*
  493. * Test suite for a basic mux clock with two parents, with
  494. * CLK_SET_RATE_PARENT on the child.
  495. *
  496. * These tests exercise the consumer API and check that the state of the
  497. * child and parents are sane and consistent.
  498. */
  499. static struct kunit_suite
  500. clk_multiple_parents_mux_test_suite = {
  501. .name = "clk-multiple-parents-mux-test",
  502. .init = clk_multiple_parents_mux_test_init,
  503. .test_cases = clk_multiple_parents_mux_test_cases,
  504. };
  505. static int
  506. clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
  507. {
  508. struct clk_multiple_parent_ctx *ctx;
  509. const char *parents[2] = { "missing-parent", "proper-parent"};
  510. int ret;
  511. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  512. if (!ctx)
  513. return -ENOMEM;
  514. test->priv = ctx;
  515. ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
  516. &clk_dummy_rate_ops,
  517. 0);
  518. ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
  519. ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
  520. if (ret)
  521. return ret;
  522. ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
  523. &clk_multiple_parents_mux_ops,
  524. CLK_SET_RATE_PARENT);
  525. ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
  526. if (ret)
  527. return ret;
  528. return 0;
  529. }
  530. /*
  531. * Test that, for a mux whose current parent hasn't been registered yet and is
  532. * thus orphan, clk_get_parent() will return NULL.
  533. */
  534. static void
  535. clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
  536. {
  537. struct clk_multiple_parent_ctx *ctx = test->priv;
  538. struct clk_hw *hw = &ctx->hw;
  539. struct clk *clk = clk_hw_get_clk(hw, NULL);
  540. KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
  541. clk_put(clk);
  542. }
  543. /*
  544. * Test that, for a mux whose current parent hasn't been registered yet,
  545. * calling clk_set_parent() to a valid parent will properly update the
  546. * mux parent and its orphan status.
  547. */
  548. static void
  549. clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
  550. {
  551. struct clk_multiple_parent_ctx *ctx = test->priv;
  552. struct clk_hw *hw = &ctx->hw;
  553. struct clk *clk = clk_hw_get_clk(hw, NULL);
  554. struct clk *parent, *new_parent;
  555. int ret;
  556. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  557. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  558. ret = clk_set_parent(clk, parent);
  559. KUNIT_ASSERT_EQ(test, ret, 0);
  560. new_parent = clk_get_parent(clk);
  561. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  562. KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
  563. clk_put(parent);
  564. clk_put(clk);
  565. }
  566. /*
  567. * Test that, for a mux that started orphan but got switched to a valid
  568. * parent, calling clk_drop_range() on the mux won't affect the parent
  569. * rate.
  570. */
  571. static void
  572. clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
  573. {
  574. struct clk_multiple_parent_ctx *ctx = test->priv;
  575. struct clk_hw *hw = &ctx->hw;
  576. struct clk *clk = clk_hw_get_clk(hw, NULL);
  577. struct clk *parent;
  578. unsigned long parent_rate, new_parent_rate;
  579. int ret;
  580. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  581. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  582. parent_rate = clk_get_rate(parent);
  583. KUNIT_ASSERT_GT(test, parent_rate, 0);
  584. ret = clk_set_parent(clk, parent);
  585. KUNIT_ASSERT_EQ(test, ret, 0);
  586. ret = clk_drop_range(clk);
  587. KUNIT_ASSERT_EQ(test, ret, 0);
  588. new_parent_rate = clk_get_rate(clk);
  589. KUNIT_ASSERT_GT(test, new_parent_rate, 0);
  590. KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
  591. clk_put(parent);
  592. clk_put(clk);
  593. }
  594. /*
  595. * Test that, for a mux that started orphan but got switched to a valid
  596. * parent, the rate of the mux and its new parent are consistent.
  597. */
  598. static void
  599. clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
  600. {
  601. struct clk_multiple_parent_ctx *ctx = test->priv;
  602. struct clk_hw *hw = &ctx->hw;
  603. struct clk *clk = clk_hw_get_clk(hw, NULL);
  604. struct clk *parent;
  605. unsigned long parent_rate, rate;
  606. int ret;
  607. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  608. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  609. parent_rate = clk_get_rate(parent);
  610. KUNIT_ASSERT_GT(test, parent_rate, 0);
  611. ret = clk_set_parent(clk, parent);
  612. KUNIT_ASSERT_EQ(test, ret, 0);
  613. rate = clk_get_rate(clk);
  614. KUNIT_ASSERT_GT(test, rate, 0);
  615. KUNIT_EXPECT_EQ(test, parent_rate, rate);
  616. clk_put(parent);
  617. clk_put(clk);
  618. }
  619. /*
  620. * Test that, for a mux that started orphan but got switched to a valid
  621. * parent, calling clk_put() on the mux won't affect the parent rate.
  622. */
  623. static void
  624. clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
  625. {
  626. struct clk_multiple_parent_ctx *ctx = test->priv;
  627. struct clk *clk, *parent;
  628. unsigned long parent_rate, new_parent_rate;
  629. int ret;
  630. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  631. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  632. clk = clk_hw_get_clk(&ctx->hw, NULL);
  633. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
  634. parent_rate = clk_get_rate(parent);
  635. KUNIT_ASSERT_GT(test, parent_rate, 0);
  636. ret = clk_set_parent(clk, parent);
  637. KUNIT_ASSERT_EQ(test, ret, 0);
  638. clk_put(clk);
  639. new_parent_rate = clk_get_rate(parent);
  640. KUNIT_ASSERT_GT(test, new_parent_rate, 0);
  641. KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
  642. clk_put(parent);
  643. }
  644. /*
  645. * Test that, for a mux that started orphan but got switched to a valid
  646. * parent, calling clk_set_rate_range() will affect the parent state if
  647. * its rate is out of range.
  648. */
  649. static void
  650. clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
  651. {
  652. struct clk_multiple_parent_ctx *ctx = test->priv;
  653. struct clk_hw *hw = &ctx->hw;
  654. struct clk *clk = clk_hw_get_clk(hw, NULL);
  655. struct clk *parent;
  656. unsigned long rate;
  657. int ret;
  658. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  659. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  660. ret = clk_set_parent(clk, parent);
  661. KUNIT_ASSERT_EQ(test, ret, 0);
  662. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  663. KUNIT_ASSERT_EQ(test, ret, 0);
  664. rate = clk_get_rate(clk);
  665. KUNIT_ASSERT_GT(test, rate, 0);
  666. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  667. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  668. clk_put(parent);
  669. clk_put(clk);
  670. }
  671. /*
  672. * Test that, for a mux that started orphan but got switched to a valid
  673. * parent, calling clk_set_rate_range() won't affect the parent state if
  674. * its rate is within range.
  675. */
  676. static void
  677. clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
  678. {
  679. struct clk_multiple_parent_ctx *ctx = test->priv;
  680. struct clk_hw *hw = &ctx->hw;
  681. struct clk *clk = clk_hw_get_clk(hw, NULL);
  682. struct clk *parent;
  683. unsigned long parent_rate, new_parent_rate;
  684. int ret;
  685. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  686. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  687. parent_rate = clk_get_rate(parent);
  688. KUNIT_ASSERT_GT(test, parent_rate, 0);
  689. ret = clk_set_parent(clk, parent);
  690. KUNIT_ASSERT_EQ(test, ret, 0);
  691. ret = clk_set_rate_range(clk,
  692. DUMMY_CLOCK_INIT_RATE - 1000,
  693. DUMMY_CLOCK_INIT_RATE + 1000);
  694. KUNIT_ASSERT_EQ(test, ret, 0);
  695. new_parent_rate = clk_get_rate(parent);
  696. KUNIT_ASSERT_GT(test, new_parent_rate, 0);
  697. KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
  698. clk_put(parent);
  699. clk_put(clk);
  700. }
  701. /*
  702. * Test that, for a mux whose current parent hasn't been registered yet,
  703. * calling clk_set_rate_range() will succeed, and will be taken into
  704. * account when rounding a rate.
  705. */
  706. static void
  707. clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
  708. {
  709. struct clk_multiple_parent_ctx *ctx = test->priv;
  710. struct clk_hw *hw = &ctx->hw;
  711. struct clk *clk = clk_hw_get_clk(hw, NULL);
  712. long rate;
  713. int ret;
  714. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  715. KUNIT_ASSERT_EQ(test, ret, 0);
  716. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  717. KUNIT_ASSERT_GT(test, rate, 0);
  718. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  719. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  720. clk_put(clk);
  721. }
  722. /*
  723. * Test that, for a mux that started orphan, was assigned and rate and
  724. * then got switched to a valid parent, its rate is eventually within
  725. * range.
  726. *
  727. * FIXME: Even though we update the rate as part of clk_set_parent(), we
  728. * don't evaluate whether that new rate is within range and needs to be
  729. * adjusted.
  730. */
  731. static void
  732. clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
  733. {
  734. struct clk_multiple_parent_ctx *ctx = test->priv;
  735. struct clk_hw *hw = &ctx->hw;
  736. struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
  737. struct clk *parent;
  738. unsigned long rate;
  739. int ret;
  740. kunit_skip(test, "This needs to be fixed in the core.");
  741. clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  742. parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
  743. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  744. ret = clk_set_parent(clk, parent);
  745. KUNIT_ASSERT_EQ(test, ret, 0);
  746. rate = clk_get_rate(clk);
  747. KUNIT_ASSERT_GT(test, rate, 0);
  748. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  749. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  750. }
  751. static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
  752. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
  753. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
  754. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
  755. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
  756. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
  757. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
  758. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
  759. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
  760. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
  761. {}
  762. };
  763. /*
  764. * Test suite for a basic mux clock with two parents. The default parent
  765. * isn't registered, only the second parent is. By default, the clock
  766. * will thus be orphan.
  767. *
  768. * These tests exercise the behaviour of the consumer API when dealing
  769. * with an orphan clock, and how we deal with the transition to a valid
  770. * parent.
  771. */
  772. static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
  773. .name = "clk-orphan-transparent-multiple-parent-mux-test",
  774. .init = clk_orphan_transparent_multiple_parent_mux_test_init,
  775. .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
  776. };
  777. struct clk_single_parent_ctx {
  778. struct clk_dummy_context parent_ctx;
  779. struct clk_hw hw;
  780. };
  781. static int clk_single_parent_mux_test_init(struct kunit *test)
  782. {
  783. struct clk_single_parent_ctx *ctx;
  784. int ret;
  785. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  786. if (!ctx)
  787. return -ENOMEM;
  788. test->priv = ctx;
  789. ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
  790. ctx->parent_ctx.hw.init =
  791. CLK_HW_INIT_NO_PARENT("parent-clk",
  792. &clk_dummy_rate_ops,
  793. 0);
  794. ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw);
  795. if (ret)
  796. return ret;
  797. ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
  798. &clk_dummy_single_parent_ops,
  799. CLK_SET_RATE_PARENT);
  800. ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
  801. if (ret)
  802. return ret;
  803. return 0;
  804. }
  805. static void
  806. clk_single_parent_mux_test_exit(struct kunit *test)
  807. {
  808. struct clk_single_parent_ctx *ctx = test->priv;
  809. clk_hw_unregister(&ctx->hw);
  810. clk_hw_unregister(&ctx->parent_ctx.hw);
  811. }
  812. /*
  813. * Test that for a clock with a single parent, clk_get_parent() actually
  814. * returns the parent.
  815. */
  816. static void
  817. clk_test_single_parent_mux_get_parent(struct kunit *test)
  818. {
  819. struct clk_single_parent_ctx *ctx = test->priv;
  820. struct clk_hw *hw = &ctx->hw;
  821. struct clk *clk = clk_hw_get_clk(hw, NULL);
  822. struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
  823. KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
  824. clk_put(parent);
  825. clk_put(clk);
  826. }
  827. /*
  828. * Test that for a clock with a single parent, clk_has_parent() actually
  829. * reports it as a parent.
  830. */
  831. static void
  832. clk_test_single_parent_mux_has_parent(struct kunit *test)
  833. {
  834. struct clk_single_parent_ctx *ctx = test->priv;
  835. struct clk_hw *hw = &ctx->hw;
  836. struct clk *clk = clk_hw_get_clk(hw, NULL);
  837. struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
  838. KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
  839. clk_put(parent);
  840. clk_put(clk);
  841. }
  842. /*
  843. * Test that for a clock that can't modify its rate and with a single
  844. * parent, if we set disjoints range on the parent and then the child,
  845. * the second will return an error.
  846. *
  847. * FIXME: clk_set_rate_range() only considers the current clock when
  848. * evaluating whether ranges are disjoints and not the upstream clocks
  849. * ranges.
  850. */
  851. static void
  852. clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
  853. {
  854. struct clk_single_parent_ctx *ctx = test->priv;
  855. struct clk_hw *hw = &ctx->hw;
  856. struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
  857. struct clk *parent;
  858. int ret;
  859. kunit_skip(test, "This needs to be fixed in the core.");
  860. parent = clk_get_parent(clk);
  861. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  862. ret = clk_set_rate_range(parent, 1000, 2000);
  863. KUNIT_ASSERT_EQ(test, ret, 0);
  864. ret = clk_set_rate_range(clk, 3000, 4000);
  865. KUNIT_EXPECT_LT(test, ret, 0);
  866. }
  867. /*
  868. * Test that for a clock that can't modify its rate and with a single
  869. * parent, if we set disjoints range on the child and then the parent,
  870. * the second will return an error.
  871. *
  872. * FIXME: clk_set_rate_range() only considers the current clock when
  873. * evaluating whether ranges are disjoints and not the downstream clocks
  874. * ranges.
  875. */
  876. static void
  877. clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
  878. {
  879. struct clk_single_parent_ctx *ctx = test->priv;
  880. struct clk_hw *hw = &ctx->hw;
  881. struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
  882. struct clk *parent;
  883. int ret;
  884. kunit_skip(test, "This needs to be fixed in the core.");
  885. parent = clk_get_parent(clk);
  886. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  887. ret = clk_set_rate_range(clk, 1000, 2000);
  888. KUNIT_ASSERT_EQ(test, ret, 0);
  889. ret = clk_set_rate_range(parent, 3000, 4000);
  890. KUNIT_EXPECT_LT(test, ret, 0);
  891. }
  892. /*
  893. * Test that for a clock that can't modify its rate and with a single
  894. * parent, if we set a range on the parent and then call
  895. * clk_round_rate(), the boundaries of the parent are taken into
  896. * account.
  897. */
  898. static void
  899. clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
  900. {
  901. struct clk_single_parent_ctx *ctx = test->priv;
  902. struct clk_hw *hw = &ctx->hw;
  903. struct clk *clk = clk_hw_get_clk(hw, NULL);
  904. struct clk *parent;
  905. long rate;
  906. int ret;
  907. parent = clk_get_parent(clk);
  908. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  909. ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  910. KUNIT_ASSERT_EQ(test, ret, 0);
  911. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  912. KUNIT_ASSERT_GT(test, rate, 0);
  913. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  914. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  915. clk_put(clk);
  916. }
  917. /*
  918. * Test that for a clock that can't modify its rate and with a single
  919. * parent, if we set a range on the parent and a more restrictive one on
  920. * the child, and then call clk_round_rate(), the boundaries of the
  921. * two clocks are taken into account.
  922. */
  923. static void
  924. clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
  925. {
  926. struct clk_single_parent_ctx *ctx = test->priv;
  927. struct clk_hw *hw = &ctx->hw;
  928. struct clk *clk = clk_hw_get_clk(hw, NULL);
  929. struct clk *parent;
  930. long rate;
  931. int ret;
  932. parent = clk_get_parent(clk);
  933. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  934. ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  935. KUNIT_ASSERT_EQ(test, ret, 0);
  936. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
  937. KUNIT_ASSERT_EQ(test, ret, 0);
  938. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  939. KUNIT_ASSERT_GT(test, rate, 0);
  940. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  941. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  942. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  943. KUNIT_ASSERT_GT(test, rate, 0);
  944. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  945. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  946. clk_put(clk);
  947. }
  948. /*
  949. * Test that for a clock that can't modify its rate and with a single
  950. * parent, if we set a range on the child and a more restrictive one on
  951. * the parent, and then call clk_round_rate(), the boundaries of the
  952. * two clocks are taken into account.
  953. */
  954. static void
  955. clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
  956. {
  957. struct clk_single_parent_ctx *ctx = test->priv;
  958. struct clk_hw *hw = &ctx->hw;
  959. struct clk *clk = clk_hw_get_clk(hw, NULL);
  960. struct clk *parent;
  961. long rate;
  962. int ret;
  963. parent = clk_get_parent(clk);
  964. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  965. ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
  966. KUNIT_ASSERT_EQ(test, ret, 0);
  967. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  968. KUNIT_ASSERT_EQ(test, ret, 0);
  969. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  970. KUNIT_ASSERT_GT(test, rate, 0);
  971. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  972. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  973. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  974. KUNIT_ASSERT_GT(test, rate, 0);
  975. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  976. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  977. clk_put(clk);
  978. }
  979. static struct kunit_case clk_single_parent_mux_test_cases[] = {
  980. KUNIT_CASE(clk_test_single_parent_mux_get_parent),
  981. KUNIT_CASE(clk_test_single_parent_mux_has_parent),
  982. KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
  983. KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
  984. KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
  985. KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
  986. KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
  987. {}
  988. };
  989. /*
  990. * Test suite for a basic mux clock with one parent, with
  991. * CLK_SET_RATE_PARENT on the child.
  992. *
  993. * These tests exercise the consumer API and check that the state of the
  994. * child and parent are sane and consistent.
  995. */
  996. static struct kunit_suite
  997. clk_single_parent_mux_test_suite = {
  998. .name = "clk-single-parent-mux-test",
  999. .init = clk_single_parent_mux_test_init,
  1000. .test_cases = clk_single_parent_mux_test_cases,
  1001. };
  1002. static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
  1003. {
  1004. struct clk_single_parent_ctx *ctx;
  1005. struct clk_init_data init = { };
  1006. const char * const parents[] = { "orphan_parent" };
  1007. int ret;
  1008. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1009. if (!ctx)
  1010. return -ENOMEM;
  1011. test->priv = ctx;
  1012. init.name = "test_orphan_dummy_parent";
  1013. init.ops = &clk_dummy_single_parent_ops;
  1014. init.parent_names = parents;
  1015. init.num_parents = ARRAY_SIZE(parents);
  1016. init.flags = CLK_SET_RATE_PARENT;
  1017. ctx->hw.init = &init;
  1018. ret = clk_hw_register(NULL, &ctx->hw);
  1019. if (ret)
  1020. return ret;
  1021. memset(&init, 0, sizeof(init));
  1022. init.name = "orphan_parent";
  1023. init.ops = &clk_dummy_rate_ops;
  1024. ctx->parent_ctx.hw.init = &init;
  1025. ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
  1026. ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
  1027. if (ret)
  1028. return ret;
  1029. return 0;
  1030. }
  1031. /*
  1032. * Test that a mux-only clock, with an initial rate within a range,
  1033. * will still have the same rate after the range has been enforced.
  1034. *
  1035. * See:
  1036. * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
  1037. */
  1038. static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
  1039. {
  1040. struct clk_single_parent_ctx *ctx = test->priv;
  1041. struct clk_hw *hw = &ctx->hw;
  1042. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1043. unsigned long rate, new_rate;
  1044. rate = clk_get_rate(clk);
  1045. KUNIT_ASSERT_GT(test, rate, 0);
  1046. KUNIT_ASSERT_EQ(test,
  1047. clk_set_rate_range(clk,
  1048. ctx->parent_ctx.rate - 1000,
  1049. ctx->parent_ctx.rate + 1000),
  1050. 0);
  1051. new_rate = clk_get_rate(clk);
  1052. KUNIT_ASSERT_GT(test, new_rate, 0);
  1053. KUNIT_EXPECT_EQ(test, rate, new_rate);
  1054. clk_put(clk);
  1055. }
  1056. static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
  1057. KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
  1058. {}
  1059. };
  1060. /*
  1061. * Test suite for a basic mux clock with one parent. The parent is
  1062. * registered after its child. The clock will thus be an orphan when
  1063. * registered, but will no longer be when the tests run.
  1064. *
  1065. * These tests make sure a clock that used to be orphan has a sane,
  1066. * consistent, behaviour.
  1067. */
  1068. static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
  1069. .name = "clk-orphan-transparent-single-parent-test",
  1070. .init = clk_orphan_transparent_single_parent_mux_test_init,
  1071. .exit = clk_single_parent_mux_test_exit,
  1072. .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
  1073. };
  1074. struct clk_single_parent_two_lvl_ctx {
  1075. struct clk_dummy_context parent_parent_ctx;
  1076. struct clk_dummy_context parent_ctx;
  1077. struct clk_hw hw;
  1078. };
  1079. static int
  1080. clk_orphan_two_level_root_last_test_init(struct kunit *test)
  1081. {
  1082. struct clk_single_parent_two_lvl_ctx *ctx;
  1083. int ret;
  1084. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1085. if (!ctx)
  1086. return -ENOMEM;
  1087. test->priv = ctx;
  1088. ctx->parent_ctx.hw.init =
  1089. CLK_HW_INIT("intermediate-parent",
  1090. "root-parent",
  1091. &clk_dummy_single_parent_ops,
  1092. CLK_SET_RATE_PARENT);
  1093. ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
  1094. if (ret)
  1095. return ret;
  1096. ctx->hw.init =
  1097. CLK_HW_INIT("test-clk", "intermediate-parent",
  1098. &clk_dummy_single_parent_ops,
  1099. CLK_SET_RATE_PARENT);
  1100. ret = clk_hw_register(NULL, &ctx->hw);
  1101. if (ret)
  1102. return ret;
  1103. ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
  1104. ctx->parent_parent_ctx.hw.init =
  1105. CLK_HW_INIT_NO_PARENT("root-parent",
  1106. &clk_dummy_rate_ops,
  1107. 0);
  1108. ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
  1109. if (ret)
  1110. return ret;
  1111. return 0;
  1112. }
  1113. static void
  1114. clk_orphan_two_level_root_last_test_exit(struct kunit *test)
  1115. {
  1116. struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
  1117. clk_hw_unregister(&ctx->hw);
  1118. clk_hw_unregister(&ctx->parent_ctx.hw);
  1119. clk_hw_unregister(&ctx->parent_parent_ctx.hw);
  1120. }
  1121. /*
  1122. * Test that, for a clock whose parent used to be orphan, clk_get_rate()
  1123. * will return the proper rate.
  1124. */
  1125. static void
  1126. clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
  1127. {
  1128. struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
  1129. struct clk_hw *hw = &ctx->hw;
  1130. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1131. unsigned long rate;
  1132. rate = clk_get_rate(clk);
  1133. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
  1134. clk_put(clk);
  1135. }
  1136. /*
  1137. * Test that, for a clock whose parent used to be orphan,
  1138. * clk_set_rate_range() won't affect its rate if it is already within
  1139. * range.
  1140. *
  1141. * See (for Exynos 4210):
  1142. * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
  1143. */
  1144. static void
  1145. clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
  1146. {
  1147. struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
  1148. struct clk_hw *hw = &ctx->hw;
  1149. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1150. unsigned long rate;
  1151. int ret;
  1152. ret = clk_set_rate_range(clk,
  1153. DUMMY_CLOCK_INIT_RATE - 1000,
  1154. DUMMY_CLOCK_INIT_RATE + 1000);
  1155. KUNIT_ASSERT_EQ(test, ret, 0);
  1156. rate = clk_get_rate(clk);
  1157. KUNIT_ASSERT_GT(test, rate, 0);
  1158. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
  1159. clk_put(clk);
  1160. }
  1161. static struct kunit_case
  1162. clk_orphan_two_level_root_last_test_cases[] = {
  1163. KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
  1164. KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
  1165. {}
  1166. };
  1167. /*
  1168. * Test suite for a basic, transparent, clock with a parent that is also
  1169. * such a clock. The parent's parent is registered last, while the
  1170. * parent and its child are registered in that order. The intermediate
  1171. * and leaf clocks will thus be orphan when registered, but the leaf
  1172. * clock itself will always have its parent and will never be
  1173. * reparented. Indeed, it's only orphan because its parent is.
  1174. *
  1175. * These tests exercise the behaviour of the consumer API when dealing
  1176. * with an orphan clock, and how we deal with the transition to a valid
  1177. * parent.
  1178. */
  1179. static struct kunit_suite
  1180. clk_orphan_two_level_root_last_test_suite = {
  1181. .name = "clk-orphan-two-level-root-last-test",
  1182. .init = clk_orphan_two_level_root_last_test_init,
  1183. .exit = clk_orphan_two_level_root_last_test_exit,
  1184. .test_cases = clk_orphan_two_level_root_last_test_cases,
  1185. };
  1186. /*
  1187. * Test that clk_set_rate_range won't return an error for a valid range
  1188. * and that it will make sure the rate of the clock is within the
  1189. * boundaries.
  1190. */
  1191. static void clk_range_test_set_range(struct kunit *test)
  1192. {
  1193. struct clk_dummy_context *ctx = test->priv;
  1194. struct clk_hw *hw = &ctx->hw;
  1195. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1196. unsigned long rate;
  1197. KUNIT_ASSERT_EQ(test,
  1198. clk_set_rate_range(clk,
  1199. DUMMY_CLOCK_RATE_1,
  1200. DUMMY_CLOCK_RATE_2),
  1201. 0);
  1202. rate = clk_get_rate(clk);
  1203. KUNIT_ASSERT_GT(test, rate, 0);
  1204. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1205. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1206. clk_put(clk);
  1207. }
  1208. /*
  1209. * Test that calling clk_set_rate_range with a minimum rate higher than
  1210. * the maximum rate returns an error.
  1211. */
  1212. static void clk_range_test_set_range_invalid(struct kunit *test)
  1213. {
  1214. struct clk_dummy_context *ctx = test->priv;
  1215. struct clk_hw *hw = &ctx->hw;
  1216. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1217. KUNIT_EXPECT_LT(test,
  1218. clk_set_rate_range(clk,
  1219. DUMMY_CLOCK_RATE_1 + 1000,
  1220. DUMMY_CLOCK_RATE_1),
  1221. 0);
  1222. clk_put(clk);
  1223. }
  1224. /*
  1225. * Test that users can't set multiple, disjoints, range that would be
  1226. * impossible to meet.
  1227. */
  1228. static void clk_range_test_multiple_disjoints_range(struct kunit *test)
  1229. {
  1230. struct clk_dummy_context *ctx = test->priv;
  1231. struct clk_hw *hw = &ctx->hw;
  1232. struct clk *user1, *user2;
  1233. user1 = clk_hw_get_clk(hw, NULL);
  1234. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1235. user2 = clk_hw_get_clk(hw, NULL);
  1236. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1237. KUNIT_ASSERT_EQ(test,
  1238. clk_set_rate_range(user1, 1000, 2000),
  1239. 0);
  1240. KUNIT_EXPECT_LT(test,
  1241. clk_set_rate_range(user2, 3000, 4000),
  1242. 0);
  1243. clk_put(user2);
  1244. clk_put(user1);
  1245. }
  1246. /*
  1247. * Test that if our clock has some boundaries and we try to round a rate
  1248. * lower than the minimum, the returned rate will be within range.
  1249. */
  1250. static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
  1251. {
  1252. struct clk_dummy_context *ctx = test->priv;
  1253. struct clk_hw *hw = &ctx->hw;
  1254. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1255. long rate;
  1256. KUNIT_ASSERT_EQ(test,
  1257. clk_set_rate_range(clk,
  1258. DUMMY_CLOCK_RATE_1,
  1259. DUMMY_CLOCK_RATE_2),
  1260. 0);
  1261. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  1262. KUNIT_ASSERT_GT(test, rate, 0);
  1263. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1264. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1265. clk_put(clk);
  1266. }
  1267. /*
  1268. * Test that if our clock has some boundaries and we try to set a rate
  1269. * higher than the maximum, the new rate will be within range.
  1270. */
  1271. static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
  1272. {
  1273. struct clk_dummy_context *ctx = test->priv;
  1274. struct clk_hw *hw = &ctx->hw;
  1275. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1276. unsigned long rate;
  1277. KUNIT_ASSERT_EQ(test,
  1278. clk_set_rate_range(clk,
  1279. DUMMY_CLOCK_RATE_1,
  1280. DUMMY_CLOCK_RATE_2),
  1281. 0);
  1282. KUNIT_ASSERT_EQ(test,
  1283. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1284. 0);
  1285. rate = clk_get_rate(clk);
  1286. KUNIT_ASSERT_GT(test, rate, 0);
  1287. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1288. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1289. clk_put(clk);
  1290. }
  1291. /*
  1292. * Test that if our clock has some boundaries and we try to round and
  1293. * set a rate lower than the minimum, the rate returned by
  1294. * clk_round_rate() will be consistent with the new rate set by
  1295. * clk_set_rate().
  1296. */
  1297. static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
  1298. {
  1299. struct clk_dummy_context *ctx = test->priv;
  1300. struct clk_hw *hw = &ctx->hw;
  1301. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1302. long rounded;
  1303. KUNIT_ASSERT_EQ(test,
  1304. clk_set_rate_range(clk,
  1305. DUMMY_CLOCK_RATE_1,
  1306. DUMMY_CLOCK_RATE_2),
  1307. 0);
  1308. rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  1309. KUNIT_ASSERT_GT(test, rounded, 0);
  1310. KUNIT_ASSERT_EQ(test,
  1311. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1312. 0);
  1313. KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
  1314. clk_put(clk);
  1315. }
  1316. /*
  1317. * Test that if our clock has some boundaries and we try to round a rate
  1318. * higher than the maximum, the returned rate will be within range.
  1319. */
  1320. static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
  1321. {
  1322. struct clk_dummy_context *ctx = test->priv;
  1323. struct clk_hw *hw = &ctx->hw;
  1324. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1325. long rate;
  1326. KUNIT_ASSERT_EQ(test,
  1327. clk_set_rate_range(clk,
  1328. DUMMY_CLOCK_RATE_1,
  1329. DUMMY_CLOCK_RATE_2),
  1330. 0);
  1331. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  1332. KUNIT_ASSERT_GT(test, rate, 0);
  1333. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1334. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1335. clk_put(clk);
  1336. }
  1337. /*
  1338. * Test that if our clock has some boundaries and we try to set a rate
  1339. * higher than the maximum, the new rate will be within range.
  1340. */
  1341. static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
  1342. {
  1343. struct clk_dummy_context *ctx = test->priv;
  1344. struct clk_hw *hw = &ctx->hw;
  1345. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1346. unsigned long rate;
  1347. KUNIT_ASSERT_EQ(test,
  1348. clk_set_rate_range(clk,
  1349. DUMMY_CLOCK_RATE_1,
  1350. DUMMY_CLOCK_RATE_2),
  1351. 0);
  1352. KUNIT_ASSERT_EQ(test,
  1353. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1354. 0);
  1355. rate = clk_get_rate(clk);
  1356. KUNIT_ASSERT_GT(test, rate, 0);
  1357. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1358. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1359. clk_put(clk);
  1360. }
  1361. /*
  1362. * Test that if our clock has some boundaries and we try to round and
  1363. * set a rate higher than the maximum, the rate returned by
  1364. * clk_round_rate() will be consistent with the new rate set by
  1365. * clk_set_rate().
  1366. */
  1367. static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
  1368. {
  1369. struct clk_dummy_context *ctx = test->priv;
  1370. struct clk_hw *hw = &ctx->hw;
  1371. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1372. long rounded;
  1373. KUNIT_ASSERT_EQ(test,
  1374. clk_set_rate_range(clk,
  1375. DUMMY_CLOCK_RATE_1,
  1376. DUMMY_CLOCK_RATE_2),
  1377. 0);
  1378. rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  1379. KUNIT_ASSERT_GT(test, rounded, 0);
  1380. KUNIT_ASSERT_EQ(test,
  1381. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1382. 0);
  1383. KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
  1384. clk_put(clk);
  1385. }
  1386. /*
  1387. * Test that if our clock has a rate lower than the minimum set by a
  1388. * call to clk_set_rate_range(), the rate will be raised to match the
  1389. * new minimum.
  1390. *
  1391. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  1392. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  1393. */
  1394. static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
  1395. {
  1396. struct clk_dummy_context *ctx = test->priv;
  1397. struct clk_hw *hw = &ctx->hw;
  1398. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1399. unsigned long rate;
  1400. KUNIT_ASSERT_EQ(test,
  1401. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1402. 0);
  1403. KUNIT_ASSERT_EQ(test,
  1404. clk_set_rate_range(clk,
  1405. DUMMY_CLOCK_RATE_1,
  1406. DUMMY_CLOCK_RATE_2),
  1407. 0);
  1408. rate = clk_get_rate(clk);
  1409. KUNIT_ASSERT_GT(test, rate, 0);
  1410. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1411. clk_put(clk);
  1412. }
  1413. /*
  1414. * Test that if our clock has a rate higher than the maximum set by a
  1415. * call to clk_set_rate_range(), the rate will be lowered to match the
  1416. * new maximum.
  1417. *
  1418. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  1419. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  1420. */
  1421. static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
  1422. {
  1423. struct clk_dummy_context *ctx = test->priv;
  1424. struct clk_hw *hw = &ctx->hw;
  1425. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1426. unsigned long rate;
  1427. KUNIT_ASSERT_EQ(test,
  1428. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1429. 0);
  1430. KUNIT_ASSERT_EQ(test,
  1431. clk_set_rate_range(clk,
  1432. DUMMY_CLOCK_RATE_1,
  1433. DUMMY_CLOCK_RATE_2),
  1434. 0);
  1435. rate = clk_get_rate(clk);
  1436. KUNIT_ASSERT_GT(test, rate, 0);
  1437. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1438. clk_put(clk);
  1439. }
  1440. static struct kunit_case clk_range_test_cases[] = {
  1441. KUNIT_CASE(clk_range_test_set_range),
  1442. KUNIT_CASE(clk_range_test_set_range_invalid),
  1443. KUNIT_CASE(clk_range_test_multiple_disjoints_range),
  1444. KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
  1445. KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
  1446. KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
  1447. KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
  1448. KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
  1449. KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
  1450. KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
  1451. KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
  1452. {}
  1453. };
  1454. /*
  1455. * Test suite for a basic rate clock, without any parent.
  1456. *
  1457. * These tests exercise the rate range API: clk_set_rate_range(),
  1458. * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
  1459. */
  1460. static struct kunit_suite clk_range_test_suite = {
  1461. .name = "clk-range-test",
  1462. .init = clk_test_init,
  1463. .exit = clk_test_exit,
  1464. .test_cases = clk_range_test_cases,
  1465. };
  1466. /*
  1467. * Test that if we have several subsequent calls to
  1468. * clk_set_rate_range(), the core will reevaluate whether a new rate is
  1469. * needed each and every time.
  1470. *
  1471. * With clk_dummy_maximize_rate_ops, this means that the rate will
  1472. * trail along the maximum as it evolves.
  1473. */
  1474. static void clk_range_test_set_range_rate_maximized(struct kunit *test)
  1475. {
  1476. struct clk_dummy_context *ctx = test->priv;
  1477. struct clk_hw *hw = &ctx->hw;
  1478. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1479. unsigned long rate;
  1480. KUNIT_ASSERT_EQ(test,
  1481. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1482. 0);
  1483. KUNIT_ASSERT_EQ(test,
  1484. clk_set_rate_range(clk,
  1485. DUMMY_CLOCK_RATE_1,
  1486. DUMMY_CLOCK_RATE_2),
  1487. 0);
  1488. rate = clk_get_rate(clk);
  1489. KUNIT_ASSERT_GT(test, rate, 0);
  1490. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1491. KUNIT_ASSERT_EQ(test,
  1492. clk_set_rate_range(clk,
  1493. DUMMY_CLOCK_RATE_1,
  1494. DUMMY_CLOCK_RATE_2 - 1000),
  1495. 0);
  1496. rate = clk_get_rate(clk);
  1497. KUNIT_ASSERT_GT(test, rate, 0);
  1498. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  1499. KUNIT_ASSERT_EQ(test,
  1500. clk_set_rate_range(clk,
  1501. DUMMY_CLOCK_RATE_1,
  1502. DUMMY_CLOCK_RATE_2),
  1503. 0);
  1504. rate = clk_get_rate(clk);
  1505. KUNIT_ASSERT_GT(test, rate, 0);
  1506. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1507. clk_put(clk);
  1508. }
  1509. /*
  1510. * Test that if we have several subsequent calls to
  1511. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1512. * whether a new rate is needed each and every time.
  1513. *
  1514. * With clk_dummy_maximize_rate_ops, this means that the rate will
  1515. * trail along the maximum as it evolves.
  1516. */
  1517. static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
  1518. {
  1519. struct clk_dummy_context *ctx = test->priv;
  1520. struct clk_hw *hw = &ctx->hw;
  1521. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1522. struct clk *user1, *user2;
  1523. unsigned long rate;
  1524. user1 = clk_hw_get_clk(hw, NULL);
  1525. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1526. user2 = clk_hw_get_clk(hw, NULL);
  1527. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1528. KUNIT_ASSERT_EQ(test,
  1529. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1530. 0);
  1531. KUNIT_ASSERT_EQ(test,
  1532. clk_set_rate_range(user1,
  1533. 0,
  1534. DUMMY_CLOCK_RATE_2),
  1535. 0);
  1536. rate = clk_get_rate(clk);
  1537. KUNIT_ASSERT_GT(test, rate, 0);
  1538. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1539. KUNIT_ASSERT_EQ(test,
  1540. clk_set_rate_range(user2,
  1541. 0,
  1542. DUMMY_CLOCK_RATE_1),
  1543. 0);
  1544. rate = clk_get_rate(clk);
  1545. KUNIT_ASSERT_GT(test, rate, 0);
  1546. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1547. KUNIT_ASSERT_EQ(test,
  1548. clk_drop_range(user2),
  1549. 0);
  1550. rate = clk_get_rate(clk);
  1551. KUNIT_ASSERT_GT(test, rate, 0);
  1552. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1553. clk_put(user2);
  1554. clk_put(user1);
  1555. clk_put(clk);
  1556. }
  1557. /*
  1558. * Test that if we have several subsequent calls to
  1559. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1560. * whether a new rate is needed, including when a user drop its clock.
  1561. *
  1562. * With clk_dummy_maximize_rate_ops, this means that the rate will
  1563. * trail along the maximum as it evolves.
  1564. */
  1565. static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
  1566. {
  1567. struct clk_dummy_context *ctx = test->priv;
  1568. struct clk_hw *hw = &ctx->hw;
  1569. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1570. struct clk *user1, *user2;
  1571. unsigned long rate;
  1572. user1 = clk_hw_get_clk(hw, NULL);
  1573. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1574. user2 = clk_hw_get_clk(hw, NULL);
  1575. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1576. KUNIT_ASSERT_EQ(test,
  1577. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1578. 0);
  1579. KUNIT_ASSERT_EQ(test,
  1580. clk_set_rate_range(user1,
  1581. 0,
  1582. DUMMY_CLOCK_RATE_2),
  1583. 0);
  1584. rate = clk_get_rate(clk);
  1585. KUNIT_ASSERT_GT(test, rate, 0);
  1586. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1587. KUNIT_ASSERT_EQ(test,
  1588. clk_set_rate_range(user2,
  1589. 0,
  1590. DUMMY_CLOCK_RATE_1),
  1591. 0);
  1592. rate = clk_get_rate(clk);
  1593. KUNIT_ASSERT_GT(test, rate, 0);
  1594. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1595. clk_put(user2);
  1596. rate = clk_get_rate(clk);
  1597. KUNIT_ASSERT_GT(test, rate, 0);
  1598. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1599. clk_put(user1);
  1600. clk_put(clk);
  1601. }
  1602. static struct kunit_case clk_range_maximize_test_cases[] = {
  1603. KUNIT_CASE(clk_range_test_set_range_rate_maximized),
  1604. KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
  1605. KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
  1606. {}
  1607. };
  1608. /*
  1609. * Test suite for a basic rate clock, without any parent.
  1610. *
  1611. * These tests exercise the rate range API: clk_set_rate_range(),
  1612. * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
  1613. * driver that will always try to run at the highest possible rate.
  1614. */
  1615. static struct kunit_suite clk_range_maximize_test_suite = {
  1616. .name = "clk-range-maximize-test",
  1617. .init = clk_maximize_test_init,
  1618. .exit = clk_test_exit,
  1619. .test_cases = clk_range_maximize_test_cases,
  1620. };
  1621. /*
  1622. * Test that if we have several subsequent calls to
  1623. * clk_set_rate_range(), the core will reevaluate whether a new rate is
  1624. * needed each and every time.
  1625. *
  1626. * With clk_dummy_minimize_rate_ops, this means that the rate will
  1627. * trail along the minimum as it evolves.
  1628. */
  1629. static void clk_range_test_set_range_rate_minimized(struct kunit *test)
  1630. {
  1631. struct clk_dummy_context *ctx = test->priv;
  1632. struct clk_hw *hw = &ctx->hw;
  1633. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1634. unsigned long rate;
  1635. KUNIT_ASSERT_EQ(test,
  1636. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1637. 0);
  1638. KUNIT_ASSERT_EQ(test,
  1639. clk_set_rate_range(clk,
  1640. DUMMY_CLOCK_RATE_1,
  1641. DUMMY_CLOCK_RATE_2),
  1642. 0);
  1643. rate = clk_get_rate(clk);
  1644. KUNIT_ASSERT_GT(test, rate, 0);
  1645. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1646. KUNIT_ASSERT_EQ(test,
  1647. clk_set_rate_range(clk,
  1648. DUMMY_CLOCK_RATE_1 + 1000,
  1649. DUMMY_CLOCK_RATE_2),
  1650. 0);
  1651. rate = clk_get_rate(clk);
  1652. KUNIT_ASSERT_GT(test, rate, 0);
  1653. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  1654. KUNIT_ASSERT_EQ(test,
  1655. clk_set_rate_range(clk,
  1656. DUMMY_CLOCK_RATE_1,
  1657. DUMMY_CLOCK_RATE_2),
  1658. 0);
  1659. rate = clk_get_rate(clk);
  1660. KUNIT_ASSERT_GT(test, rate, 0);
  1661. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1662. clk_put(clk);
  1663. }
  1664. /*
  1665. * Test that if we have several subsequent calls to
  1666. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1667. * whether a new rate is needed each and every time.
  1668. *
  1669. * With clk_dummy_minimize_rate_ops, this means that the rate will
  1670. * trail along the minimum as it evolves.
  1671. */
  1672. static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
  1673. {
  1674. struct clk_dummy_context *ctx = test->priv;
  1675. struct clk_hw *hw = &ctx->hw;
  1676. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1677. struct clk *user1, *user2;
  1678. unsigned long rate;
  1679. user1 = clk_hw_get_clk(hw, NULL);
  1680. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1681. user2 = clk_hw_get_clk(hw, NULL);
  1682. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1683. KUNIT_ASSERT_EQ(test,
  1684. clk_set_rate_range(user1,
  1685. DUMMY_CLOCK_RATE_1,
  1686. ULONG_MAX),
  1687. 0);
  1688. rate = clk_get_rate(clk);
  1689. KUNIT_ASSERT_GT(test, rate, 0);
  1690. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1691. KUNIT_ASSERT_EQ(test,
  1692. clk_set_rate_range(user2,
  1693. DUMMY_CLOCK_RATE_2,
  1694. ULONG_MAX),
  1695. 0);
  1696. rate = clk_get_rate(clk);
  1697. KUNIT_ASSERT_GT(test, rate, 0);
  1698. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1699. KUNIT_ASSERT_EQ(test,
  1700. clk_drop_range(user2),
  1701. 0);
  1702. rate = clk_get_rate(clk);
  1703. KUNIT_ASSERT_GT(test, rate, 0);
  1704. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1705. clk_put(user2);
  1706. clk_put(user1);
  1707. clk_put(clk);
  1708. }
  1709. /*
  1710. * Test that if we have several subsequent calls to
  1711. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1712. * whether a new rate is needed, including when a user drop its clock.
  1713. *
  1714. * With clk_dummy_minimize_rate_ops, this means that the rate will
  1715. * trail along the minimum as it evolves.
  1716. */
  1717. static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
  1718. {
  1719. struct clk_dummy_context *ctx = test->priv;
  1720. struct clk_hw *hw = &ctx->hw;
  1721. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1722. struct clk *user1, *user2;
  1723. unsigned long rate;
  1724. user1 = clk_hw_get_clk(hw, NULL);
  1725. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1726. user2 = clk_hw_get_clk(hw, NULL);
  1727. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1728. KUNIT_ASSERT_EQ(test,
  1729. clk_set_rate_range(user1,
  1730. DUMMY_CLOCK_RATE_1,
  1731. ULONG_MAX),
  1732. 0);
  1733. rate = clk_get_rate(clk);
  1734. KUNIT_ASSERT_GT(test, rate, 0);
  1735. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1736. KUNIT_ASSERT_EQ(test,
  1737. clk_set_rate_range(user2,
  1738. DUMMY_CLOCK_RATE_2,
  1739. ULONG_MAX),
  1740. 0);
  1741. rate = clk_get_rate(clk);
  1742. KUNIT_ASSERT_GT(test, rate, 0);
  1743. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1744. clk_put(user2);
  1745. rate = clk_get_rate(clk);
  1746. KUNIT_ASSERT_GT(test, rate, 0);
  1747. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1748. clk_put(user1);
  1749. clk_put(clk);
  1750. }
  1751. static struct kunit_case clk_range_minimize_test_cases[] = {
  1752. KUNIT_CASE(clk_range_test_set_range_rate_minimized),
  1753. KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
  1754. KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
  1755. {}
  1756. };
  1757. /*
  1758. * Test suite for a basic rate clock, without any parent.
  1759. *
  1760. * These tests exercise the rate range API: clk_set_rate_range(),
  1761. * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
  1762. * driver that will always try to run at the lowest possible rate.
  1763. */
  1764. static struct kunit_suite clk_range_minimize_test_suite = {
  1765. .name = "clk-range-minimize-test",
  1766. .init = clk_minimize_test_init,
  1767. .exit = clk_test_exit,
  1768. .test_cases = clk_range_minimize_test_cases,
  1769. };
  1770. struct clk_leaf_mux_ctx {
  1771. struct clk_multiple_parent_ctx mux_ctx;
  1772. struct clk_hw hw;
  1773. struct clk_hw parent;
  1774. struct clk_rate_request *req;
  1775. int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
  1776. };
  1777. static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  1778. {
  1779. struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
  1780. int ret;
  1781. struct clk_rate_request *parent_req = ctx->req;
  1782. clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
  1783. ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
  1784. if (ret)
  1785. return ret;
  1786. req->rate = parent_req->rate;
  1787. return 0;
  1788. }
  1789. static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
  1790. .determine_rate = clk_leaf_mux_determine_rate,
  1791. .set_parent = clk_dummy_single_set_parent,
  1792. .get_parent = clk_dummy_single_get_parent,
  1793. };
  1794. static int
  1795. clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
  1796. {
  1797. struct clk_leaf_mux_ctx *ctx;
  1798. const char *top_parents[2] = { "parent-0", "parent-1" };
  1799. int ret;
  1800. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1801. if (!ctx)
  1802. return -ENOMEM;
  1803. test->priv = ctx;
  1804. ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
  1805. &clk_dummy_rate_ops,
  1806. 0);
  1807. ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
  1808. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
  1809. if (ret)
  1810. return ret;
  1811. ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
  1812. &clk_dummy_rate_ops,
  1813. 0);
  1814. ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
  1815. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
  1816. if (ret)
  1817. return ret;
  1818. ctx->mux_ctx.current_parent = 0;
  1819. ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
  1820. &clk_multiple_parents_mux_ops,
  1821. 0);
  1822. ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
  1823. if (ret)
  1824. return ret;
  1825. ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
  1826. &empty_clk_ops, CLK_SET_RATE_PARENT);
  1827. ret = clk_hw_register(NULL, &ctx->parent);
  1828. if (ret)
  1829. return ret;
  1830. ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
  1831. &clk_leaf_mux_set_rate_parent_ops,
  1832. CLK_SET_RATE_PARENT);
  1833. ret = clk_hw_register(NULL, &ctx->hw);
  1834. if (ret)
  1835. return ret;
  1836. return 0;
  1837. }
  1838. static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
  1839. {
  1840. struct clk_leaf_mux_ctx *ctx = test->priv;
  1841. clk_hw_unregister(&ctx->hw);
  1842. clk_hw_unregister(&ctx->parent);
  1843. clk_hw_unregister(&ctx->mux_ctx.hw);
  1844. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
  1845. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
  1846. }
  1847. struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
  1848. const char *desc;
  1849. int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
  1850. };
  1851. static void
  1852. clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
  1853. const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
  1854. {
  1855. strcpy(desc, t->desc);
  1856. }
  1857. static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
  1858. clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
  1859. {
  1860. /*
  1861. * Test that __clk_determine_rate() on the parent that can't
  1862. * change rate doesn't return a clk_rate_request structure with
  1863. * the best_parent_hw pointer pointing to the parent.
  1864. */
  1865. .desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
  1866. .determine_rate_func = __clk_determine_rate,
  1867. },
  1868. {
  1869. /*
  1870. * Test that __clk_mux_determine_rate() on the parent that
  1871. * can't change rate doesn't return a clk_rate_request
  1872. * structure with the best_parent_hw pointer pointing to
  1873. * the parent.
  1874. */
  1875. .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
  1876. .determine_rate_func = __clk_mux_determine_rate,
  1877. },
  1878. {
  1879. /*
  1880. * Test that __clk_mux_determine_rate_closest() on the parent
  1881. * that can't change rate doesn't return a clk_rate_request
  1882. * structure with the best_parent_hw pointer pointing to
  1883. * the parent.
  1884. */
  1885. .desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
  1886. .determine_rate_func = __clk_mux_determine_rate_closest,
  1887. },
  1888. {
  1889. /*
  1890. * Test that clk_hw_determine_rate_no_reparent() on the parent
  1891. * that can't change rate doesn't return a clk_rate_request
  1892. * structure with the best_parent_hw pointer pointing to
  1893. * the parent.
  1894. */
  1895. .desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
  1896. .determine_rate_func = clk_hw_determine_rate_no_reparent,
  1897. },
  1898. };
  1899. KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
  1900. clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
  1901. clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
  1902. /*
  1903. * Test that when a clk that can't change rate itself calls a function like
  1904. * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
  1905. * structure that has the best_parent_hw pointer point to the clk_hw passed
  1906. * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
  1907. * forwarding clk_rate_requests to the parent") for more background.
  1908. */
  1909. static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
  1910. {
  1911. struct clk_leaf_mux_ctx *ctx = test->priv;
  1912. struct clk_hw *hw = &ctx->hw;
  1913. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1914. struct clk_rate_request req;
  1915. unsigned long rate;
  1916. const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
  1917. test_param = test->param_value;
  1918. ctx->determine_rate_func = test_param->determine_rate_func;
  1919. ctx->req = &req;
  1920. rate = clk_get_rate(clk);
  1921. KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1922. KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
  1923. KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
  1924. KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
  1925. KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
  1926. clk_put(clk);
  1927. }
  1928. static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
  1929. KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
  1930. clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
  1931. {}
  1932. };
  1933. /*
  1934. * Test suite for a clock whose parent is a pass-through clk whose parent is a
  1935. * mux with multiple parents. The leaf and pass-through clocks have the
  1936. * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
  1937. * will then select which parent is the best fit for a given rate.
  1938. *
  1939. * These tests exercise the behaviour of muxes, and the proper selection
  1940. * of parents.
  1941. */
  1942. static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
  1943. .name = "clk-leaf-mux-set-rate-parent",
  1944. .init = clk_leaf_mux_set_rate_parent_test_init,
  1945. .exit = clk_leaf_mux_set_rate_parent_test_exit,
  1946. .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
  1947. };
  1948. struct clk_mux_notifier_rate_change {
  1949. bool done;
  1950. unsigned long old_rate;
  1951. unsigned long new_rate;
  1952. wait_queue_head_t wq;
  1953. };
  1954. struct clk_mux_notifier_ctx {
  1955. struct clk_multiple_parent_ctx mux_ctx;
  1956. struct clk *clk;
  1957. struct notifier_block clk_nb;
  1958. struct clk_mux_notifier_rate_change pre_rate_change;
  1959. struct clk_mux_notifier_rate_change post_rate_change;
  1960. };
  1961. #define NOTIFIER_TIMEOUT_MS 100
  1962. static int clk_mux_notifier_callback(struct notifier_block *nb,
  1963. unsigned long action, void *data)
  1964. {
  1965. struct clk_notifier_data *clk_data = data;
  1966. struct clk_mux_notifier_ctx *ctx = container_of(nb,
  1967. struct clk_mux_notifier_ctx,
  1968. clk_nb);
  1969. if (action & PRE_RATE_CHANGE) {
  1970. ctx->pre_rate_change.old_rate = clk_data->old_rate;
  1971. ctx->pre_rate_change.new_rate = clk_data->new_rate;
  1972. ctx->pre_rate_change.done = true;
  1973. wake_up_interruptible(&ctx->pre_rate_change.wq);
  1974. }
  1975. if (action & POST_RATE_CHANGE) {
  1976. ctx->post_rate_change.old_rate = clk_data->old_rate;
  1977. ctx->post_rate_change.new_rate = clk_data->new_rate;
  1978. ctx->post_rate_change.done = true;
  1979. wake_up_interruptible(&ctx->post_rate_change.wq);
  1980. }
  1981. return 0;
  1982. }
  1983. static int clk_mux_notifier_test_init(struct kunit *test)
  1984. {
  1985. struct clk_mux_notifier_ctx *ctx;
  1986. const char *top_parents[2] = { "parent-0", "parent-1" };
  1987. int ret;
  1988. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1989. if (!ctx)
  1990. return -ENOMEM;
  1991. test->priv = ctx;
  1992. ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
  1993. init_waitqueue_head(&ctx->pre_rate_change.wq);
  1994. init_waitqueue_head(&ctx->post_rate_change.wq);
  1995. ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
  1996. &clk_dummy_rate_ops,
  1997. 0);
  1998. ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
  1999. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
  2000. if (ret)
  2001. return ret;
  2002. ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
  2003. &clk_dummy_rate_ops,
  2004. 0);
  2005. ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
  2006. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
  2007. if (ret)
  2008. return ret;
  2009. ctx->mux_ctx.current_parent = 0;
  2010. ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
  2011. &clk_multiple_parents_mux_ops,
  2012. 0);
  2013. ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
  2014. if (ret)
  2015. return ret;
  2016. ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
  2017. ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
  2018. if (ret)
  2019. return ret;
  2020. return 0;
  2021. }
  2022. static void clk_mux_notifier_test_exit(struct kunit *test)
  2023. {
  2024. struct clk_mux_notifier_ctx *ctx = test->priv;
  2025. struct clk *clk = ctx->clk;
  2026. clk_notifier_unregister(clk, &ctx->clk_nb);
  2027. clk_put(clk);
  2028. clk_hw_unregister(&ctx->mux_ctx.hw);
  2029. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
  2030. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
  2031. }
  2032. /*
  2033. * Test that if the we have a notifier registered on a mux, the core
  2034. * will notify us when we switch to another parent, and with the proper
  2035. * old and new rates.
  2036. */
  2037. static void clk_mux_notifier_set_parent_test(struct kunit *test)
  2038. {
  2039. struct clk_mux_notifier_ctx *ctx = test->priv;
  2040. struct clk_hw *hw = &ctx->mux_ctx.hw;
  2041. struct clk *clk = clk_hw_get_clk(hw, NULL);
  2042. struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
  2043. int ret;
  2044. ret = clk_set_parent(clk, new_parent);
  2045. KUNIT_ASSERT_EQ(test, ret, 0);
  2046. ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
  2047. ctx->pre_rate_change.done,
  2048. msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
  2049. KUNIT_ASSERT_GT(test, ret, 0);
  2050. KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
  2051. KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
  2052. ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
  2053. ctx->post_rate_change.done,
  2054. msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
  2055. KUNIT_ASSERT_GT(test, ret, 0);
  2056. KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
  2057. KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
  2058. clk_put(new_parent);
  2059. clk_put(clk);
  2060. }
  2061. static struct kunit_case clk_mux_notifier_test_cases[] = {
  2062. KUNIT_CASE(clk_mux_notifier_set_parent_test),
  2063. {}
  2064. };
  2065. /*
  2066. * Test suite for a mux with multiple parents, and a notifier registered
  2067. * on the mux.
  2068. *
  2069. * These tests exercise the behaviour of notifiers.
  2070. */
  2071. static struct kunit_suite clk_mux_notifier_test_suite = {
  2072. .name = "clk-mux-notifier",
  2073. .init = clk_mux_notifier_test_init,
  2074. .exit = clk_mux_notifier_test_exit,
  2075. .test_cases = clk_mux_notifier_test_cases,
  2076. };
  2077. static int
  2078. clk_mux_no_reparent_test_init(struct kunit *test)
  2079. {
  2080. struct clk_multiple_parent_ctx *ctx;
  2081. const char *parents[2] = { "parent-0", "parent-1"};
  2082. int ret;
  2083. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  2084. if (!ctx)
  2085. return -ENOMEM;
  2086. test->priv = ctx;
  2087. ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
  2088. &clk_dummy_rate_ops,
  2089. 0);
  2090. ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
  2091. ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
  2092. if (ret)
  2093. return ret;
  2094. ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
  2095. &clk_dummy_rate_ops,
  2096. 0);
  2097. ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
  2098. ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
  2099. if (ret)
  2100. return ret;
  2101. ctx->current_parent = 0;
  2102. ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
  2103. &clk_multiple_parents_no_reparent_mux_ops,
  2104. 0);
  2105. ret = clk_hw_register(NULL, &ctx->hw);
  2106. if (ret)
  2107. return ret;
  2108. return 0;
  2109. }
  2110. static void
  2111. clk_mux_no_reparent_test_exit(struct kunit *test)
  2112. {
  2113. struct clk_multiple_parent_ctx *ctx = test->priv;
  2114. clk_hw_unregister(&ctx->hw);
  2115. clk_hw_unregister(&ctx->parents_ctx[0].hw);
  2116. clk_hw_unregister(&ctx->parents_ctx[1].hw);
  2117. }
  2118. /*
  2119. * Test that if the we have a mux that cannot change parent and we call
  2120. * clk_round_rate() on it with a rate that should cause it to change
  2121. * parent, it won't.
  2122. */
  2123. static void clk_mux_no_reparent_round_rate(struct kunit *test)
  2124. {
  2125. struct clk_multiple_parent_ctx *ctx = test->priv;
  2126. struct clk_hw *hw = &ctx->hw;
  2127. struct clk *clk = clk_hw_get_clk(hw, NULL);
  2128. struct clk *other_parent, *parent;
  2129. unsigned long other_parent_rate;
  2130. unsigned long parent_rate;
  2131. long rounded_rate;
  2132. parent = clk_get_parent(clk);
  2133. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  2134. parent_rate = clk_get_rate(parent);
  2135. KUNIT_ASSERT_GT(test, parent_rate, 0);
  2136. other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  2137. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
  2138. KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
  2139. other_parent_rate = clk_get_rate(other_parent);
  2140. KUNIT_ASSERT_GT(test, other_parent_rate, 0);
  2141. clk_put(other_parent);
  2142. rounded_rate = clk_round_rate(clk, other_parent_rate);
  2143. KUNIT_ASSERT_GT(test, rounded_rate, 0);
  2144. KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
  2145. clk_put(clk);
  2146. }
  2147. /*
  2148. * Test that if the we have a mux that cannot change parent and we call
  2149. * clk_set_rate() on it with a rate that should cause it to change
  2150. * parent, it won't.
  2151. */
  2152. static void clk_mux_no_reparent_set_rate(struct kunit *test)
  2153. {
  2154. struct clk_multiple_parent_ctx *ctx = test->priv;
  2155. struct clk_hw *hw = &ctx->hw;
  2156. struct clk *clk = clk_hw_get_clk(hw, NULL);
  2157. struct clk *other_parent, *parent;
  2158. unsigned long other_parent_rate;
  2159. unsigned long parent_rate;
  2160. unsigned long rate;
  2161. int ret;
  2162. parent = clk_get_parent(clk);
  2163. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  2164. parent_rate = clk_get_rate(parent);
  2165. KUNIT_ASSERT_GT(test, parent_rate, 0);
  2166. other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  2167. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
  2168. KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
  2169. other_parent_rate = clk_get_rate(other_parent);
  2170. KUNIT_ASSERT_GT(test, other_parent_rate, 0);
  2171. clk_put(other_parent);
  2172. ret = clk_set_rate(clk, other_parent_rate);
  2173. KUNIT_ASSERT_EQ(test, ret, 0);
  2174. rate = clk_get_rate(clk);
  2175. KUNIT_ASSERT_GT(test, rate, 0);
  2176. KUNIT_EXPECT_EQ(test, rate, parent_rate);
  2177. clk_put(clk);
  2178. }
  2179. static struct kunit_case clk_mux_no_reparent_test_cases[] = {
  2180. KUNIT_CASE(clk_mux_no_reparent_round_rate),
  2181. KUNIT_CASE(clk_mux_no_reparent_set_rate),
  2182. {}
  2183. };
  2184. /*
  2185. * Test suite for a clock mux that isn't allowed to change parent, using
  2186. * the clk_hw_determine_rate_no_reparent() helper.
  2187. *
  2188. * These tests exercise that helper, and the proper selection of
  2189. * rates and parents.
  2190. */
  2191. static struct kunit_suite clk_mux_no_reparent_test_suite = {
  2192. .name = "clk-mux-no-reparent",
  2193. .init = clk_mux_no_reparent_test_init,
  2194. .exit = clk_mux_no_reparent_test_exit,
  2195. .test_cases = clk_mux_no_reparent_test_cases,
  2196. };
  2197. struct clk_register_clk_parent_data_test_case {
  2198. const char *desc;
  2199. struct clk_parent_data pdata;
  2200. };
  2201. static void
  2202. clk_register_clk_parent_data_test_case_to_desc(
  2203. const struct clk_register_clk_parent_data_test_case *t, char *desc)
  2204. {
  2205. strcpy(desc, t->desc);
  2206. }
  2207. static const struct clk_register_clk_parent_data_test_case
  2208. clk_register_clk_parent_data_of_cases[] = {
  2209. {
  2210. /*
  2211. * Test that a clk registered with a struct device_node can
  2212. * find a parent based on struct clk_parent_data::index.
  2213. */
  2214. .desc = "clk_parent_data_of_index_test",
  2215. .pdata.index = 0,
  2216. },
  2217. {
  2218. /*
  2219. * Test that a clk registered with a struct device_node can
  2220. * find a parent based on struct clk_parent_data::fwname.
  2221. */
  2222. .desc = "clk_parent_data_of_fwname_test",
  2223. .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
  2224. },
  2225. {
  2226. /*
  2227. * Test that a clk registered with a struct device_node can
  2228. * find a parent based on struct clk_parent_data::name.
  2229. */
  2230. .desc = "clk_parent_data_of_name_test",
  2231. /* The index must be negative to indicate firmware not used */
  2232. .pdata.index = -1,
  2233. .pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
  2234. },
  2235. {
  2236. /*
  2237. * Test that a clk registered with a struct device_node can
  2238. * find a parent based on struct
  2239. * clk_parent_data::{fw_name,name}.
  2240. */
  2241. .desc = "clk_parent_data_of_fwname_name_test",
  2242. .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
  2243. .pdata.name = "not_matching",
  2244. },
  2245. {
  2246. /*
  2247. * Test that a clk registered with a struct device_node can
  2248. * find a parent based on struct clk_parent_data::{index,name}.
  2249. * Index takes priority.
  2250. */
  2251. .desc = "clk_parent_data_of_index_name_priority_test",
  2252. .pdata.index = 0,
  2253. .pdata.name = "not_matching",
  2254. },
  2255. {
  2256. /*
  2257. * Test that a clk registered with a struct device_node can
  2258. * find a parent based on struct
  2259. * clk_parent_data::{index,fwname,name}. The fw_name takes
  2260. * priority over index and name.
  2261. */
  2262. .desc = "clk_parent_data_of_index_fwname_name_priority_test",
  2263. .pdata.index = 1,
  2264. .pdata.fw_name = CLK_PARENT_DATA_PARENT1,
  2265. .pdata.name = "not_matching",
  2266. },
  2267. };
  2268. KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
  2269. clk_register_clk_parent_data_test_case_to_desc)
  2270. /**
  2271. * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
  2272. * @np: device node of clk under test
  2273. * @hw: clk_hw for clk under test
  2274. */
  2275. struct clk_register_clk_parent_data_of_ctx {
  2276. struct device_node *np;
  2277. struct clk_hw hw;
  2278. };
  2279. static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
  2280. {
  2281. struct clk_register_clk_parent_data_of_ctx *ctx;
  2282. KUNIT_ASSERT_EQ(test, 0,
  2283. of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
  2284. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  2285. if (!ctx)
  2286. return -ENOMEM;
  2287. test->priv = ctx;
  2288. ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
  2289. if (!ctx->np)
  2290. return -ENODEV;
  2291. of_node_put_kunit(test, ctx->np);
  2292. return 0;
  2293. }
  2294. /*
  2295. * Test that a clk registered with a struct device_node can find a parent based on
  2296. * struct clk_parent_data when the hw member isn't set.
  2297. */
  2298. static void clk_register_clk_parent_data_of_test(struct kunit *test)
  2299. {
  2300. struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
  2301. struct clk_hw *parent_hw;
  2302. const struct clk_register_clk_parent_data_test_case *test_param;
  2303. struct clk_init_data init = { };
  2304. struct clk *expected_parent, *actual_parent;
  2305. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
  2306. expected_parent = of_clk_get_kunit(test, ctx->np, 0);
  2307. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
  2308. test_param = test->param_value;
  2309. init.parent_data = &test_param->pdata;
  2310. init.num_parents = 1;
  2311. init.name = "parent_data_of_test_clk";
  2312. init.ops = &clk_dummy_single_parent_ops;
  2313. ctx->hw.init = &init;
  2314. KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
  2315. parent_hw = clk_hw_get_parent(&ctx->hw);
  2316. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
  2317. actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
  2318. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
  2319. KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
  2320. }
  2321. static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
  2322. KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
  2323. clk_register_clk_parent_data_of_test_gen_params),
  2324. {}
  2325. };
  2326. /*
  2327. * Test suite for registering clks with struct clk_parent_data and a struct
  2328. * device_node.
  2329. */
  2330. static struct kunit_suite clk_register_clk_parent_data_of_suite = {
  2331. .name = "clk_register_clk_parent_data_of",
  2332. .init = clk_register_clk_parent_data_of_test_init,
  2333. .test_cases = clk_register_clk_parent_data_of_test_cases,
  2334. };
  2335. /**
  2336. * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
  2337. * @dev: device of clk under test
  2338. * @hw: clk_hw for clk under test
  2339. * @pdrv: driver to attach to find @dev
  2340. */
  2341. struct clk_register_clk_parent_data_device_ctx {
  2342. struct device *dev;
  2343. struct clk_hw hw;
  2344. struct platform_driver pdrv;
  2345. };
  2346. static inline struct clk_register_clk_parent_data_device_ctx *
  2347. clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
  2348. {
  2349. return container_of(to_platform_driver(pdev->dev.driver),
  2350. struct clk_register_clk_parent_data_device_ctx, pdrv);
  2351. }
  2352. static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
  2353. {
  2354. struct clk_register_clk_parent_data_device_ctx *ctx;
  2355. ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
  2356. ctx->dev = &pdev->dev;
  2357. return 0;
  2358. }
  2359. static void clk_register_clk_parent_data_device_driver(struct kunit *test)
  2360. {
  2361. struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
  2362. static const struct of_device_id match_table[] = {
  2363. { .compatible = "test,clk-parent-data" },
  2364. { }
  2365. };
  2366. ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
  2367. ctx->pdrv.driver.of_match_table = match_table;
  2368. ctx->pdrv.driver.name = __func__;
  2369. ctx->pdrv.driver.owner = THIS_MODULE;
  2370. KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
  2371. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
  2372. }
  2373. static const struct clk_register_clk_parent_data_test_case
  2374. clk_register_clk_parent_data_device_cases[] = {
  2375. {
  2376. /*
  2377. * Test that a clk registered with a struct device can find a
  2378. * parent based on struct clk_parent_data::index.
  2379. */
  2380. .desc = "clk_parent_data_device_index_test",
  2381. .pdata.index = 1,
  2382. },
  2383. {
  2384. /*
  2385. * Test that a clk registered with a struct device can find a
  2386. * parent based on struct clk_parent_data::fwname.
  2387. */
  2388. .desc = "clk_parent_data_device_fwname_test",
  2389. .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
  2390. },
  2391. {
  2392. /*
  2393. * Test that a clk registered with a struct device can find a
  2394. * parent based on struct clk_parent_data::name.
  2395. */
  2396. .desc = "clk_parent_data_device_name_test",
  2397. /* The index must be negative to indicate firmware not used */
  2398. .pdata.index = -1,
  2399. .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
  2400. },
  2401. {
  2402. /*
  2403. * Test that a clk registered with a struct device can find a
  2404. * parent based on struct clk_parent_data::{fw_name,name}.
  2405. */
  2406. .desc = "clk_parent_data_device_fwname_name_test",
  2407. .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
  2408. .pdata.name = "not_matching",
  2409. },
  2410. {
  2411. /*
  2412. * Test that a clk registered with a struct device can find a
  2413. * parent based on struct clk_parent_data::{index,name}. Index
  2414. * takes priority.
  2415. */
  2416. .desc = "clk_parent_data_device_index_name_priority_test",
  2417. .pdata.index = 1,
  2418. .pdata.name = "not_matching",
  2419. },
  2420. {
  2421. /*
  2422. * Test that a clk registered with a struct device can find a
  2423. * parent based on struct clk_parent_data::{index,fwname,name}.
  2424. * The fw_name takes priority over index and name.
  2425. */
  2426. .desc = "clk_parent_data_device_index_fwname_name_priority_test",
  2427. .pdata.index = 0,
  2428. .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
  2429. .pdata.name = "not_matching",
  2430. },
  2431. };
  2432. KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
  2433. clk_register_clk_parent_data_device_cases,
  2434. clk_register_clk_parent_data_test_case_to_desc)
  2435. /*
  2436. * Test that a clk registered with a struct device can find a parent based on
  2437. * struct clk_parent_data when the hw member isn't set.
  2438. */
  2439. static void clk_register_clk_parent_data_device_test(struct kunit *test)
  2440. {
  2441. struct clk_register_clk_parent_data_device_ctx *ctx;
  2442. const struct clk_register_clk_parent_data_test_case *test_param;
  2443. struct clk_hw *parent_hw;
  2444. struct clk_init_data init = { };
  2445. struct clk *expected_parent, *actual_parent;
  2446. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  2447. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
  2448. test->priv = ctx;
  2449. clk_register_clk_parent_data_device_driver(test);
  2450. expected_parent = clk_get_kunit(test, ctx->dev, "50");
  2451. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
  2452. test_param = test->param_value;
  2453. init.parent_data = &test_param->pdata;
  2454. init.num_parents = 1;
  2455. init.name = "parent_data_device_test_clk";
  2456. init.ops = &clk_dummy_single_parent_ops;
  2457. ctx->hw.init = &init;
  2458. KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
  2459. parent_hw = clk_hw_get_parent(&ctx->hw);
  2460. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
  2461. actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
  2462. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
  2463. KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
  2464. }
  2465. static const struct clk_register_clk_parent_data_test_case
  2466. clk_register_clk_parent_data_device_hw_cases[] = {
  2467. {
  2468. /*
  2469. * Test that a clk registered with a struct device can find a
  2470. * parent based on struct clk_parent_data::hw.
  2471. */
  2472. .desc = "clk_parent_data_device_hw_index_test",
  2473. /* The index must be negative to indicate firmware not used */
  2474. .pdata.index = -1,
  2475. },
  2476. {
  2477. /*
  2478. * Test that a clk registered with a struct device can find a
  2479. * parent based on struct clk_parent_data::hw when
  2480. * struct clk_parent_data::fw_name is set.
  2481. */
  2482. .desc = "clk_parent_data_device_hw_fwname_test",
  2483. .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
  2484. },
  2485. {
  2486. /*
  2487. * Test that a clk registered with a struct device can find a
  2488. * parent based on struct clk_parent_data::hw when struct
  2489. * clk_parent_data::name is set.
  2490. */
  2491. .desc = "clk_parent_data_device_hw_name_test",
  2492. /* The index must be negative to indicate firmware not used */
  2493. .pdata.index = -1,
  2494. .pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
  2495. },
  2496. {
  2497. /*
  2498. * Test that a clk registered with a struct device can find a
  2499. * parent based on struct clk_parent_data::hw when struct
  2500. * clk_parent_data::{fw_name,name} are set.
  2501. */
  2502. .desc = "clk_parent_data_device_hw_fwname_name_test",
  2503. .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
  2504. .pdata.name = "not_matching",
  2505. },
  2506. {
  2507. /*
  2508. * Test that a clk registered with a struct device can find a
  2509. * parent based on struct clk_parent_data::hw when struct
  2510. * clk_parent_data::index is set. The hw pointer takes
  2511. * priority.
  2512. */
  2513. .desc = "clk_parent_data_device_hw_index_priority_test",
  2514. .pdata.index = 0,
  2515. },
  2516. {
  2517. /*
  2518. * Test that a clk registered with a struct device can find a
  2519. * parent based on struct clk_parent_data::hw when
  2520. * struct clk_parent_data::{index,fwname,name} are set.
  2521. * The hw pointer takes priority over everything else.
  2522. */
  2523. .desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
  2524. .pdata.index = 0,
  2525. .pdata.fw_name = CLK_PARENT_DATA_PARENT2,
  2526. .pdata.name = "not_matching",
  2527. },
  2528. };
  2529. KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
  2530. clk_register_clk_parent_data_device_hw_cases,
  2531. clk_register_clk_parent_data_test_case_to_desc)
  2532. /*
  2533. * Test that a clk registered with a struct device can find a
  2534. * parent based on struct clk_parent_data::hw.
  2535. */
  2536. static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
  2537. {
  2538. struct clk_register_clk_parent_data_device_ctx *ctx;
  2539. const struct clk_register_clk_parent_data_test_case *test_param;
  2540. struct clk_dummy_context *parent;
  2541. struct clk_hw *parent_hw;
  2542. struct clk_parent_data pdata = { };
  2543. struct clk_init_data init = { };
  2544. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  2545. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
  2546. test->priv = ctx;
  2547. clk_register_clk_parent_data_device_driver(test);
  2548. parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
  2549. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  2550. parent_hw = &parent->hw;
  2551. parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
  2552. &clk_dummy_rate_ops, 0);
  2553. KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
  2554. test_param = test->param_value;
  2555. memcpy(&pdata, &test_param->pdata, sizeof(pdata));
  2556. pdata.hw = parent_hw;
  2557. init.parent_data = &pdata;
  2558. init.num_parents = 1;
  2559. init.ops = &clk_dummy_single_parent_ops;
  2560. init.name = "parent_data_device_hw_test_clk";
  2561. ctx->hw.init = &init;
  2562. KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
  2563. KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
  2564. }
  2565. static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
  2566. KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
  2567. clk_register_clk_parent_data_device_test_gen_params),
  2568. KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
  2569. clk_register_clk_parent_data_device_hw_test_gen_params),
  2570. {}
  2571. };
  2572. static int clk_register_clk_parent_data_device_init(struct kunit *test)
  2573. {
  2574. KUNIT_ASSERT_EQ(test, 0,
  2575. of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
  2576. return 0;
  2577. }
  2578. /*
  2579. * Test suite for registering clks with struct clk_parent_data and a struct
  2580. * device.
  2581. */
  2582. static struct kunit_suite clk_register_clk_parent_data_device_suite = {
  2583. .name = "clk_register_clk_parent_data_device",
  2584. .init = clk_register_clk_parent_data_device_init,
  2585. .test_cases = clk_register_clk_parent_data_device_test_cases,
  2586. };
  2587. kunit_test_suites(
  2588. &clk_leaf_mux_set_rate_parent_test_suite,
  2589. &clk_test_suite,
  2590. &clk_multiple_parents_mux_test_suite,
  2591. &clk_mux_no_reparent_test_suite,
  2592. &clk_mux_notifier_test_suite,
  2593. &clk_orphan_transparent_multiple_parent_mux_test_suite,
  2594. &clk_orphan_transparent_single_parent_test_suite,
  2595. &clk_orphan_two_level_root_last_test_suite,
  2596. &clk_range_test_suite,
  2597. &clk_range_maximize_test_suite,
  2598. &clk_range_minimize_test_suite,
  2599. &clk_register_clk_parent_data_of_suite,
  2600. &clk_register_clk_parent_data_device_suite,
  2601. &clk_single_parent_mux_test_suite,
  2602. &clk_uncached_test_suite,
  2603. );
  2604. MODULE_DESCRIPTION("Kunit tests for clk framework");
  2605. MODULE_LICENSE("GPL v2");