core.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Reset Controller framework
  4. *
  5. * Copyright 2013 Philipp Zabel, Pengutronix
  6. */
  7. #include <linux/atomic.h>
  8. #include <linux/cleanup.h>
  9. #include <linux/device.h>
  10. #include <linux/err.h>
  11. #include <linux/export.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kref.h>
  14. #include <linux/gpio/driver.h>
  15. #include <linux/gpio/machine.h>
  16. #include <linux/idr.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/acpi.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/reset.h>
  22. #include <linux/reset-controller.h>
  23. #include <linux/slab.h>
  24. static DEFINE_MUTEX(reset_list_mutex);
  25. static LIST_HEAD(reset_controller_list);
  26. static DEFINE_MUTEX(reset_lookup_mutex);
  27. static LIST_HEAD(reset_lookup_list);
  28. /* Protects reset_gpio_lookup_list */
  29. static DEFINE_MUTEX(reset_gpio_lookup_mutex);
  30. static LIST_HEAD(reset_gpio_lookup_list);
  31. static DEFINE_IDA(reset_gpio_ida);
  32. /**
  33. * struct reset_control - a reset control
  34. * @rcdev: a pointer to the reset controller device
  35. * this reset control belongs to
  36. * @list: list entry for the rcdev's reset controller list
  37. * @id: ID of the reset controller in the reset
  38. * controller device
  39. * @refcnt: Number of gets of this reset_control
  40. * @acquired: Only one reset_control may be acquired for a given rcdev and id.
  41. * @shared: Is this a shared (1), or an exclusive (0) reset_control?
  42. * @array: Is this an array of reset controls (1)?
  43. * @deassert_count: Number of times this reset line has been deasserted
  44. * @triggered_count: Number of times this reset line has been reset. Currently
  45. * only used for shared resets, which means that the value
  46. * will be either 0 or 1.
  47. */
  48. struct reset_control {
  49. struct reset_controller_dev *rcdev;
  50. struct list_head list;
  51. unsigned int id;
  52. struct kref refcnt;
  53. bool acquired;
  54. bool shared;
  55. bool array;
  56. atomic_t deassert_count;
  57. atomic_t triggered_count;
  58. };
  59. /**
  60. * struct reset_control_array - an array of reset controls
  61. * @base: reset control for compatibility with reset control API functions
  62. * @num_rstcs: number of reset controls
  63. * @rstc: array of reset controls
  64. */
  65. struct reset_control_array {
  66. struct reset_control base;
  67. unsigned int num_rstcs;
  68. struct reset_control *rstc[] __counted_by(num_rstcs);
  69. };
  70. /**
  71. * struct reset_gpio_lookup - lookup key for ad-hoc created reset-gpio devices
  72. * @of_args: phandle to the reset controller with all the args like GPIO number
  73. * @list: list entry for the reset_gpio_lookup_list
  74. */
  75. struct reset_gpio_lookup {
  76. struct of_phandle_args of_args;
  77. struct list_head list;
  78. };
  79. static const char *rcdev_name(struct reset_controller_dev *rcdev)
  80. {
  81. if (rcdev->dev)
  82. return dev_name(rcdev->dev);
  83. if (rcdev->of_node)
  84. return rcdev->of_node->full_name;
  85. if (rcdev->of_args)
  86. return rcdev->of_args->np->full_name;
  87. return NULL;
  88. }
  89. /**
  90. * of_reset_simple_xlate - translate reset_spec to the reset line number
  91. * @rcdev: a pointer to the reset controller device
  92. * @reset_spec: reset line specifier as found in the device tree
  93. *
  94. * This static translation function is used by default if of_xlate in
  95. * :c:type:`reset_controller_dev` is not set. It is useful for all reset
  96. * controllers with 1:1 mapping, where reset lines can be indexed by number
  97. * without gaps.
  98. */
  99. static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
  100. const struct of_phandle_args *reset_spec)
  101. {
  102. if (reset_spec->args[0] >= rcdev->nr_resets)
  103. return -EINVAL;
  104. return reset_spec->args[0];
  105. }
  106. /**
  107. * reset_controller_register - register a reset controller device
  108. * @rcdev: a pointer to the initialized reset controller device
  109. */
  110. int reset_controller_register(struct reset_controller_dev *rcdev)
  111. {
  112. if (rcdev->of_node && rcdev->of_args)
  113. return -EINVAL;
  114. if (!rcdev->of_xlate) {
  115. rcdev->of_reset_n_cells = 1;
  116. rcdev->of_xlate = of_reset_simple_xlate;
  117. }
  118. INIT_LIST_HEAD(&rcdev->reset_control_head);
  119. mutex_lock(&reset_list_mutex);
  120. list_add(&rcdev->list, &reset_controller_list);
  121. mutex_unlock(&reset_list_mutex);
  122. return 0;
  123. }
  124. EXPORT_SYMBOL_GPL(reset_controller_register);
  125. /**
  126. * reset_controller_unregister - unregister a reset controller device
  127. * @rcdev: a pointer to the reset controller device
  128. */
  129. void reset_controller_unregister(struct reset_controller_dev *rcdev)
  130. {
  131. mutex_lock(&reset_list_mutex);
  132. list_del(&rcdev->list);
  133. mutex_unlock(&reset_list_mutex);
  134. }
  135. EXPORT_SYMBOL_GPL(reset_controller_unregister);
  136. static void devm_reset_controller_release(struct device *dev, void *res)
  137. {
  138. reset_controller_unregister(*(struct reset_controller_dev **)res);
  139. }
  140. /**
  141. * devm_reset_controller_register - resource managed reset_controller_register()
  142. * @dev: device that is registering this reset controller
  143. * @rcdev: a pointer to the initialized reset controller device
  144. *
  145. * Managed reset_controller_register(). For reset controllers registered by
  146. * this function, reset_controller_unregister() is automatically called on
  147. * driver detach. See reset_controller_register() for more information.
  148. */
  149. int devm_reset_controller_register(struct device *dev,
  150. struct reset_controller_dev *rcdev)
  151. {
  152. struct reset_controller_dev **rcdevp;
  153. int ret;
  154. rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
  155. GFP_KERNEL);
  156. if (!rcdevp)
  157. return -ENOMEM;
  158. ret = reset_controller_register(rcdev);
  159. if (ret) {
  160. devres_free(rcdevp);
  161. return ret;
  162. }
  163. *rcdevp = rcdev;
  164. devres_add(dev, rcdevp);
  165. return ret;
  166. }
  167. EXPORT_SYMBOL_GPL(devm_reset_controller_register);
  168. /**
  169. * reset_controller_add_lookup - register a set of lookup entries
  170. * @lookup: array of reset lookup entries
  171. * @num_entries: number of entries in the lookup array
  172. */
  173. void reset_controller_add_lookup(struct reset_control_lookup *lookup,
  174. unsigned int num_entries)
  175. {
  176. struct reset_control_lookup *entry;
  177. unsigned int i;
  178. mutex_lock(&reset_lookup_mutex);
  179. for (i = 0; i < num_entries; i++) {
  180. entry = &lookup[i];
  181. if (!entry->dev_id || !entry->provider) {
  182. pr_warn("%s(): reset lookup entry badly specified, skipping\n",
  183. __func__);
  184. continue;
  185. }
  186. list_add_tail(&entry->list, &reset_lookup_list);
  187. }
  188. mutex_unlock(&reset_lookup_mutex);
  189. }
  190. EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
  191. static inline struct reset_control_array *
  192. rstc_to_array(struct reset_control *rstc) {
  193. return container_of(rstc, struct reset_control_array, base);
  194. }
  195. static int reset_control_array_reset(struct reset_control_array *resets)
  196. {
  197. int ret, i;
  198. for (i = 0; i < resets->num_rstcs; i++) {
  199. ret = reset_control_reset(resets->rstc[i]);
  200. if (ret)
  201. return ret;
  202. }
  203. return 0;
  204. }
  205. static int reset_control_array_rearm(struct reset_control_array *resets)
  206. {
  207. struct reset_control *rstc;
  208. int i;
  209. for (i = 0; i < resets->num_rstcs; i++) {
  210. rstc = resets->rstc[i];
  211. if (!rstc)
  212. continue;
  213. if (WARN_ON(IS_ERR(rstc)))
  214. return -EINVAL;
  215. if (rstc->shared) {
  216. if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
  217. return -EINVAL;
  218. } else {
  219. if (!rstc->acquired)
  220. return -EPERM;
  221. }
  222. }
  223. for (i = 0; i < resets->num_rstcs; i++) {
  224. rstc = resets->rstc[i];
  225. if (rstc && rstc->shared)
  226. WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
  227. }
  228. return 0;
  229. }
  230. static int reset_control_array_assert(struct reset_control_array *resets)
  231. {
  232. int ret, i;
  233. for (i = 0; i < resets->num_rstcs; i++) {
  234. ret = reset_control_assert(resets->rstc[i]);
  235. if (ret)
  236. goto err;
  237. }
  238. return 0;
  239. err:
  240. while (i--)
  241. reset_control_deassert(resets->rstc[i]);
  242. return ret;
  243. }
  244. static int reset_control_array_deassert(struct reset_control_array *resets)
  245. {
  246. int ret, i;
  247. for (i = 0; i < resets->num_rstcs; i++) {
  248. ret = reset_control_deassert(resets->rstc[i]);
  249. if (ret)
  250. goto err;
  251. }
  252. return 0;
  253. err:
  254. while (i--)
  255. reset_control_assert(resets->rstc[i]);
  256. return ret;
  257. }
  258. static int reset_control_array_acquire(struct reset_control_array *resets)
  259. {
  260. unsigned int i;
  261. int err;
  262. for (i = 0; i < resets->num_rstcs; i++) {
  263. err = reset_control_acquire(resets->rstc[i]);
  264. if (err < 0)
  265. goto release;
  266. }
  267. return 0;
  268. release:
  269. while (i--)
  270. reset_control_release(resets->rstc[i]);
  271. return err;
  272. }
  273. static void reset_control_array_release(struct reset_control_array *resets)
  274. {
  275. unsigned int i;
  276. for (i = 0; i < resets->num_rstcs; i++)
  277. reset_control_release(resets->rstc[i]);
  278. }
  279. static inline bool reset_control_is_array(struct reset_control *rstc)
  280. {
  281. return rstc->array;
  282. }
  283. /**
  284. * reset_control_reset - reset the controlled device
  285. * @rstc: reset controller
  286. *
  287. * On a shared reset line the actual reset pulse is only triggered once for the
  288. * lifetime of the reset_control instance: for all but the first caller this is
  289. * a no-op.
  290. * Consumers must not use reset_control_(de)assert on shared reset lines when
  291. * reset_control_reset has been used.
  292. *
  293. * If rstc is NULL it is an optional reset and the function will just
  294. * return 0.
  295. */
  296. int reset_control_reset(struct reset_control *rstc)
  297. {
  298. int ret;
  299. if (!rstc)
  300. return 0;
  301. if (WARN_ON(IS_ERR(rstc)))
  302. return -EINVAL;
  303. if (reset_control_is_array(rstc))
  304. return reset_control_array_reset(rstc_to_array(rstc));
  305. if (!rstc->rcdev->ops->reset)
  306. return -ENOTSUPP;
  307. if (rstc->shared) {
  308. if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
  309. return -EINVAL;
  310. if (atomic_inc_return(&rstc->triggered_count) != 1)
  311. return 0;
  312. } else {
  313. if (!rstc->acquired)
  314. return -EPERM;
  315. }
  316. ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
  317. if (rstc->shared && ret)
  318. atomic_dec(&rstc->triggered_count);
  319. return ret;
  320. }
  321. EXPORT_SYMBOL_GPL(reset_control_reset);
  322. /**
  323. * reset_control_bulk_reset - reset the controlled devices in order
  324. * @num_rstcs: number of entries in rstcs array
  325. * @rstcs: array of struct reset_control_bulk_data with reset controls set
  326. *
  327. * Issue a reset on all provided reset controls, in order.
  328. *
  329. * See also: reset_control_reset()
  330. */
  331. int reset_control_bulk_reset(int num_rstcs,
  332. struct reset_control_bulk_data *rstcs)
  333. {
  334. int ret, i;
  335. for (i = 0; i < num_rstcs; i++) {
  336. ret = reset_control_reset(rstcs[i].rstc);
  337. if (ret)
  338. return ret;
  339. }
  340. return 0;
  341. }
  342. EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
  343. /**
  344. * reset_control_rearm - allow shared reset line to be re-triggered"
  345. * @rstc: reset controller
  346. *
  347. * On a shared reset line the actual reset pulse is only triggered once for the
  348. * lifetime of the reset_control instance, except if this call is used.
  349. *
  350. * Calls to this function must be balanced with calls to reset_control_reset,
  351. * a warning is thrown in case triggered_count ever dips below 0.
  352. *
  353. * Consumers must not use reset_control_(de)assert on shared reset lines when
  354. * reset_control_reset or reset_control_rearm have been used.
  355. *
  356. * If rstc is NULL the function will just return 0.
  357. */
  358. int reset_control_rearm(struct reset_control *rstc)
  359. {
  360. if (!rstc)
  361. return 0;
  362. if (WARN_ON(IS_ERR(rstc)))
  363. return -EINVAL;
  364. if (reset_control_is_array(rstc))
  365. return reset_control_array_rearm(rstc_to_array(rstc));
  366. if (rstc->shared) {
  367. if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
  368. return -EINVAL;
  369. WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
  370. } else {
  371. if (!rstc->acquired)
  372. return -EPERM;
  373. }
  374. return 0;
  375. }
  376. EXPORT_SYMBOL_GPL(reset_control_rearm);
  377. /**
  378. * reset_control_assert - asserts the reset line
  379. * @rstc: reset controller
  380. *
  381. * Calling this on an exclusive reset controller guarantees that the reset
  382. * will be asserted. When called on a shared reset controller the line may
  383. * still be deasserted, as long as other users keep it so.
  384. *
  385. * For shared reset controls a driver cannot expect the hw's registers and
  386. * internal state to be reset, but must be prepared for this to happen.
  387. * Consumers must not use reset_control_reset on shared reset lines when
  388. * reset_control_(de)assert has been used.
  389. *
  390. * If rstc is NULL it is an optional reset and the function will just
  391. * return 0.
  392. */
  393. int reset_control_assert(struct reset_control *rstc)
  394. {
  395. if (!rstc)
  396. return 0;
  397. if (WARN_ON(IS_ERR(rstc)))
  398. return -EINVAL;
  399. if (reset_control_is_array(rstc))
  400. return reset_control_array_assert(rstc_to_array(rstc));
  401. if (rstc->shared) {
  402. if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
  403. return -EINVAL;
  404. if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
  405. return -EINVAL;
  406. if (atomic_dec_return(&rstc->deassert_count) != 0)
  407. return 0;
  408. /*
  409. * Shared reset controls allow the reset line to be in any state
  410. * after this call, so doing nothing is a valid option.
  411. */
  412. if (!rstc->rcdev->ops->assert)
  413. return 0;
  414. } else {
  415. /*
  416. * If the reset controller does not implement .assert(), there
  417. * is no way to guarantee that the reset line is asserted after
  418. * this call.
  419. */
  420. if (!rstc->rcdev->ops->assert)
  421. return -ENOTSUPP;
  422. if (!rstc->acquired) {
  423. WARN(1, "reset %s (ID: %u) is not acquired\n",
  424. rcdev_name(rstc->rcdev), rstc->id);
  425. return -EPERM;
  426. }
  427. }
  428. return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
  429. }
  430. EXPORT_SYMBOL_GPL(reset_control_assert);
  431. /**
  432. * reset_control_bulk_assert - asserts the reset lines in order
  433. * @num_rstcs: number of entries in rstcs array
  434. * @rstcs: array of struct reset_control_bulk_data with reset controls set
  435. *
  436. * Assert the reset lines for all provided reset controls, in order.
  437. * If an assertion fails, already asserted resets are deasserted again.
  438. *
  439. * See also: reset_control_assert()
  440. */
  441. int reset_control_bulk_assert(int num_rstcs,
  442. struct reset_control_bulk_data *rstcs)
  443. {
  444. int ret, i;
  445. for (i = 0; i < num_rstcs; i++) {
  446. ret = reset_control_assert(rstcs[i].rstc);
  447. if (ret)
  448. goto err;
  449. }
  450. return 0;
  451. err:
  452. while (i--)
  453. reset_control_deassert(rstcs[i].rstc);
  454. return ret;
  455. }
  456. EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
  457. /**
  458. * reset_control_deassert - deasserts the reset line
  459. * @rstc: reset controller
  460. *
  461. * After calling this function, the reset is guaranteed to be deasserted.
  462. * Consumers must not use reset_control_reset on shared reset lines when
  463. * reset_control_(de)assert has been used.
  464. *
  465. * If rstc is NULL it is an optional reset and the function will just
  466. * return 0.
  467. */
  468. int reset_control_deassert(struct reset_control *rstc)
  469. {
  470. if (!rstc)
  471. return 0;
  472. if (WARN_ON(IS_ERR(rstc)))
  473. return -EINVAL;
  474. if (reset_control_is_array(rstc))
  475. return reset_control_array_deassert(rstc_to_array(rstc));
  476. if (rstc->shared) {
  477. if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
  478. return -EINVAL;
  479. if (atomic_inc_return(&rstc->deassert_count) != 1)
  480. return 0;
  481. } else {
  482. if (!rstc->acquired) {
  483. WARN(1, "reset %s (ID: %u) is not acquired\n",
  484. rcdev_name(rstc->rcdev), rstc->id);
  485. return -EPERM;
  486. }
  487. }
  488. /*
  489. * If the reset controller does not implement .deassert(), we assume
  490. * that it handles self-deasserting reset lines via .reset(). In that
  491. * case, the reset lines are deasserted by default. If that is not the
  492. * case, the reset controller driver should implement .deassert() and
  493. * return -ENOTSUPP.
  494. */
  495. if (!rstc->rcdev->ops->deassert)
  496. return 0;
  497. return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
  498. }
  499. EXPORT_SYMBOL_GPL(reset_control_deassert);
  500. /**
  501. * reset_control_bulk_deassert - deasserts the reset lines in reverse order
  502. * @num_rstcs: number of entries in rstcs array
  503. * @rstcs: array of struct reset_control_bulk_data with reset controls set
  504. *
  505. * Deassert the reset lines for all provided reset controls, in reverse order.
  506. * If a deassertion fails, already deasserted resets are asserted again.
  507. *
  508. * See also: reset_control_deassert()
  509. */
  510. int reset_control_bulk_deassert(int num_rstcs,
  511. struct reset_control_bulk_data *rstcs)
  512. {
  513. int ret, i;
  514. for (i = num_rstcs - 1; i >= 0; i--) {
  515. ret = reset_control_deassert(rstcs[i].rstc);
  516. if (ret)
  517. goto err;
  518. }
  519. return 0;
  520. err:
  521. while (i < num_rstcs)
  522. reset_control_assert(rstcs[i++].rstc);
  523. return ret;
  524. }
  525. EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
  526. /**
  527. * reset_control_status - returns a negative errno if not supported, a
  528. * positive value if the reset line is asserted, or zero if the reset
  529. * line is not asserted or if the desc is NULL (optional reset).
  530. * @rstc: reset controller
  531. */
  532. int reset_control_status(struct reset_control *rstc)
  533. {
  534. if (!rstc)
  535. return 0;
  536. if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
  537. return -EINVAL;
  538. if (rstc->rcdev->ops->status)
  539. return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
  540. return -ENOTSUPP;
  541. }
  542. EXPORT_SYMBOL_GPL(reset_control_status);
  543. /**
  544. * reset_control_acquire() - acquires a reset control for exclusive use
  545. * @rstc: reset control
  546. *
  547. * This is used to explicitly acquire a reset control for exclusive use. Note
  548. * that exclusive resets are requested as acquired by default. In order for a
  549. * second consumer to be able to control the reset, the first consumer has to
  550. * release it first. Typically the easiest way to achieve this is to call the
  551. * reset_control_get_exclusive_released() to obtain an instance of the reset
  552. * control. Such reset controls are not acquired by default.
  553. *
  554. * Consumers implementing shared access to an exclusive reset need to follow
  555. * a specific protocol in order to work together. Before consumers can change
  556. * a reset they must acquire exclusive access using reset_control_acquire().
  557. * After they are done operating the reset, they must release exclusive access
  558. * with a call to reset_control_release(). Consumers are not granted exclusive
  559. * access to the reset as long as another consumer hasn't released a reset.
  560. *
  561. * See also: reset_control_release()
  562. */
  563. int reset_control_acquire(struct reset_control *rstc)
  564. {
  565. struct reset_control *rc;
  566. if (!rstc)
  567. return 0;
  568. if (WARN_ON(IS_ERR(rstc)))
  569. return -EINVAL;
  570. if (reset_control_is_array(rstc))
  571. return reset_control_array_acquire(rstc_to_array(rstc));
  572. mutex_lock(&reset_list_mutex);
  573. if (rstc->acquired) {
  574. mutex_unlock(&reset_list_mutex);
  575. return 0;
  576. }
  577. list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
  578. if (rstc != rc && rstc->id == rc->id) {
  579. if (rc->acquired) {
  580. mutex_unlock(&reset_list_mutex);
  581. return -EBUSY;
  582. }
  583. }
  584. }
  585. rstc->acquired = true;
  586. mutex_unlock(&reset_list_mutex);
  587. return 0;
  588. }
  589. EXPORT_SYMBOL_GPL(reset_control_acquire);
  590. /**
  591. * reset_control_bulk_acquire - acquires reset controls for exclusive use
  592. * @num_rstcs: number of entries in rstcs array
  593. * @rstcs: array of struct reset_control_bulk_data with reset controls set
  594. *
  595. * This is used to explicitly acquire reset controls requested with
  596. * reset_control_bulk_get_exclusive_release() for temporary exclusive use.
  597. *
  598. * See also: reset_control_acquire(), reset_control_bulk_release()
  599. */
  600. int reset_control_bulk_acquire(int num_rstcs,
  601. struct reset_control_bulk_data *rstcs)
  602. {
  603. int ret, i;
  604. for (i = 0; i < num_rstcs; i++) {
  605. ret = reset_control_acquire(rstcs[i].rstc);
  606. if (ret)
  607. goto err;
  608. }
  609. return 0;
  610. err:
  611. while (i--)
  612. reset_control_release(rstcs[i].rstc);
  613. return ret;
  614. }
  615. EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
  616. /**
  617. * reset_control_release() - releases exclusive access to a reset control
  618. * @rstc: reset control
  619. *
  620. * Releases exclusive access right to a reset control previously obtained by a
  621. * call to reset_control_acquire(). Until a consumer calls this function, no
  622. * other consumers will be granted exclusive access.
  623. *
  624. * See also: reset_control_acquire()
  625. */
  626. void reset_control_release(struct reset_control *rstc)
  627. {
  628. if (!rstc || WARN_ON(IS_ERR(rstc)))
  629. return;
  630. if (reset_control_is_array(rstc))
  631. reset_control_array_release(rstc_to_array(rstc));
  632. else
  633. rstc->acquired = false;
  634. }
  635. EXPORT_SYMBOL_GPL(reset_control_release);
  636. /**
  637. * reset_control_bulk_release() - releases exclusive access to reset controls
  638. * @num_rstcs: number of entries in rstcs array
  639. * @rstcs: array of struct reset_control_bulk_data with reset controls set
  640. *
  641. * Releases exclusive access right to reset controls previously obtained by a
  642. * call to reset_control_bulk_acquire().
  643. *
  644. * See also: reset_control_release(), reset_control_bulk_acquire()
  645. */
  646. void reset_control_bulk_release(int num_rstcs,
  647. struct reset_control_bulk_data *rstcs)
  648. {
  649. int i;
  650. for (i = 0; i < num_rstcs; i++)
  651. reset_control_release(rstcs[i].rstc);
  652. }
  653. EXPORT_SYMBOL_GPL(reset_control_bulk_release);
  654. static struct reset_control *
  655. __reset_control_get_internal(struct reset_controller_dev *rcdev,
  656. unsigned int index, bool shared, bool acquired)
  657. {
  658. struct reset_control *rstc;
  659. lockdep_assert_held(&reset_list_mutex);
  660. list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
  661. if (rstc->id == index) {
  662. /*
  663. * Allow creating a secondary exclusive reset_control
  664. * that is initially not acquired for an already
  665. * controlled reset line.
  666. */
  667. if (!rstc->shared && !shared && !acquired)
  668. break;
  669. if (WARN_ON(!rstc->shared || !shared))
  670. return ERR_PTR(-EBUSY);
  671. kref_get(&rstc->refcnt);
  672. return rstc;
  673. }
  674. }
  675. rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
  676. if (!rstc)
  677. return ERR_PTR(-ENOMEM);
  678. if (!try_module_get(rcdev->owner)) {
  679. kfree(rstc);
  680. return ERR_PTR(-ENODEV);
  681. }
  682. rstc->rcdev = rcdev;
  683. list_add(&rstc->list, &rcdev->reset_control_head);
  684. rstc->id = index;
  685. kref_init(&rstc->refcnt);
  686. rstc->acquired = acquired;
  687. rstc->shared = shared;
  688. get_device(rcdev->dev);
  689. return rstc;
  690. }
  691. static void __reset_control_release(struct kref *kref)
  692. {
  693. struct reset_control *rstc = container_of(kref, struct reset_control,
  694. refcnt);
  695. lockdep_assert_held(&reset_list_mutex);
  696. module_put(rstc->rcdev->owner);
  697. list_del(&rstc->list);
  698. put_device(rstc->rcdev->dev);
  699. kfree(rstc);
  700. }
  701. static void __reset_control_put_internal(struct reset_control *rstc)
  702. {
  703. lockdep_assert_held(&reset_list_mutex);
  704. if (IS_ERR_OR_NULL(rstc))
  705. return;
  706. kref_put(&rstc->refcnt, __reset_control_release);
  707. }
  708. static int __reset_add_reset_gpio_lookup(int id, struct device_node *np,
  709. unsigned int gpio,
  710. unsigned int of_flags)
  711. {
  712. const struct fwnode_handle *fwnode = of_fwnode_handle(np);
  713. unsigned int lookup_flags;
  714. const char *label_tmp;
  715. /*
  716. * Later we map GPIO flags between OF and Linux, however not all
  717. * constants from include/dt-bindings/gpio/gpio.h and
  718. * include/linux/gpio/machine.h match each other.
  719. */
  720. if (of_flags > GPIO_ACTIVE_LOW) {
  721. pr_err("reset-gpio code does not support GPIO flags %u for GPIO %u\n",
  722. of_flags, gpio);
  723. return -EINVAL;
  724. }
  725. struct gpio_device *gdev __free(gpio_device_put) = gpio_device_find_by_fwnode(fwnode);
  726. if (!gdev)
  727. return -EPROBE_DEFER;
  728. label_tmp = gpio_device_get_label(gdev);
  729. if (!label_tmp)
  730. return -EINVAL;
  731. char *label __free(kfree) = kstrdup(label_tmp, GFP_KERNEL);
  732. if (!label)
  733. return -ENOMEM;
  734. /* Size: one lookup entry plus sentinel */
  735. struct gpiod_lookup_table *lookup __free(kfree) = kzalloc(struct_size(lookup, table, 2),
  736. GFP_KERNEL);
  737. if (!lookup)
  738. return -ENOMEM;
  739. lookup->dev_id = kasprintf(GFP_KERNEL, "reset-gpio.%d", id);
  740. if (!lookup->dev_id)
  741. return -ENOMEM;
  742. lookup_flags = GPIO_PERSISTENT;
  743. lookup_flags |= of_flags & GPIO_ACTIVE_LOW;
  744. lookup->table[0] = GPIO_LOOKUP(no_free_ptr(label), gpio, "reset",
  745. lookup_flags);
  746. /* Not freed on success, because it is persisent subsystem data. */
  747. gpiod_add_lookup_table(no_free_ptr(lookup));
  748. return 0;
  749. }
  750. /*
  751. * @args: phandle to the GPIO provider with all the args like GPIO number
  752. */
  753. static int __reset_add_reset_gpio_device(const struct of_phandle_args *args)
  754. {
  755. struct reset_gpio_lookup *rgpio_dev;
  756. struct platform_device *pdev;
  757. int id, ret;
  758. /*
  759. * Currently only #gpio-cells=2 is supported with the meaning of:
  760. * args[0]: GPIO number
  761. * args[1]: GPIO flags
  762. * TODO: Handle other cases.
  763. */
  764. if (args->args_count != 2)
  765. return -ENOENT;
  766. /*
  767. * Registering reset-gpio device might cause immediate
  768. * bind, resulting in its probe() registering new reset controller thus
  769. * taking reset_list_mutex lock via reset_controller_register().
  770. */
  771. lockdep_assert_not_held(&reset_list_mutex);
  772. guard(mutex)(&reset_gpio_lookup_mutex);
  773. list_for_each_entry(rgpio_dev, &reset_gpio_lookup_list, list) {
  774. if (args->np == rgpio_dev->of_args.np) {
  775. if (of_phandle_args_equal(args, &rgpio_dev->of_args))
  776. return 0; /* Already on the list, done */
  777. }
  778. }
  779. id = ida_alloc(&reset_gpio_ida, GFP_KERNEL);
  780. if (id < 0)
  781. return id;
  782. /* Not freed on success, because it is persisent subsystem data. */
  783. rgpio_dev = kzalloc(sizeof(*rgpio_dev), GFP_KERNEL);
  784. if (!rgpio_dev) {
  785. ret = -ENOMEM;
  786. goto err_ida_free;
  787. }
  788. ret = __reset_add_reset_gpio_lookup(id, args->np, args->args[0],
  789. args->args[1]);
  790. if (ret < 0)
  791. goto err_kfree;
  792. rgpio_dev->of_args = *args;
  793. /*
  794. * We keep the device_node reference, but of_args.np is put at the end
  795. * of __of_reset_control_get(), so get it one more time.
  796. * Hold reference as long as rgpio_dev memory is valid.
  797. */
  798. of_node_get(rgpio_dev->of_args.np);
  799. pdev = platform_device_register_data(NULL, "reset-gpio", id,
  800. &rgpio_dev->of_args,
  801. sizeof(rgpio_dev->of_args));
  802. ret = PTR_ERR_OR_ZERO(pdev);
  803. if (ret)
  804. goto err_put;
  805. list_add(&rgpio_dev->list, &reset_gpio_lookup_list);
  806. return 0;
  807. err_put:
  808. of_node_put(rgpio_dev->of_args.np);
  809. err_kfree:
  810. kfree(rgpio_dev);
  811. err_ida_free:
  812. ida_free(&reset_gpio_ida, id);
  813. return ret;
  814. }
  815. static struct reset_controller_dev *__reset_find_rcdev(const struct of_phandle_args *args,
  816. bool gpio_fallback)
  817. {
  818. struct reset_controller_dev *rcdev;
  819. lockdep_assert_held(&reset_list_mutex);
  820. list_for_each_entry(rcdev, &reset_controller_list, list) {
  821. if (gpio_fallback) {
  822. if (rcdev->of_args && of_phandle_args_equal(args,
  823. rcdev->of_args))
  824. return rcdev;
  825. } else {
  826. if (args->np == rcdev->of_node)
  827. return rcdev;
  828. }
  829. }
  830. return NULL;
  831. }
  832. struct reset_control *
  833. __of_reset_control_get(struct device_node *node, const char *id, int index,
  834. bool shared, bool optional, bool acquired)
  835. {
  836. bool gpio_fallback = false;
  837. struct reset_control *rstc;
  838. struct reset_controller_dev *rcdev;
  839. struct of_phandle_args args;
  840. int rstc_id;
  841. int ret;
  842. if (!node)
  843. return ERR_PTR(-EINVAL);
  844. if (id) {
  845. index = of_property_match_string(node,
  846. "reset-names", id);
  847. if (index == -EILSEQ)
  848. return ERR_PTR(index);
  849. if (index < 0)
  850. return optional ? NULL : ERR_PTR(-ENOENT);
  851. }
  852. ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
  853. index, &args);
  854. if (ret == -EINVAL)
  855. return ERR_PTR(ret);
  856. if (ret) {
  857. if (!IS_ENABLED(CONFIG_RESET_GPIO))
  858. return optional ? NULL : ERR_PTR(ret);
  859. /*
  860. * There can be only one reset-gpio for regular devices, so
  861. * don't bother with the "reset-gpios" phandle index.
  862. */
  863. ret = of_parse_phandle_with_args(node, "reset-gpios", "#gpio-cells",
  864. 0, &args);
  865. if (ret)
  866. return optional ? NULL : ERR_PTR(ret);
  867. gpio_fallback = true;
  868. ret = __reset_add_reset_gpio_device(&args);
  869. if (ret) {
  870. rstc = ERR_PTR(ret);
  871. goto out_put;
  872. }
  873. }
  874. mutex_lock(&reset_list_mutex);
  875. rcdev = __reset_find_rcdev(&args, gpio_fallback);
  876. if (!rcdev) {
  877. rstc = ERR_PTR(-EPROBE_DEFER);
  878. goto out_unlock;
  879. }
  880. if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
  881. rstc = ERR_PTR(-EINVAL);
  882. goto out_unlock;
  883. }
  884. rstc_id = rcdev->of_xlate(rcdev, &args);
  885. if (rstc_id < 0) {
  886. rstc = ERR_PTR(rstc_id);
  887. goto out_unlock;
  888. }
  889. /* reset_list_mutex also protects the rcdev's reset_control list */
  890. rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
  891. out_unlock:
  892. mutex_unlock(&reset_list_mutex);
  893. out_put:
  894. of_node_put(args.np);
  895. return rstc;
  896. }
  897. EXPORT_SYMBOL_GPL(__of_reset_control_get);
  898. static struct reset_controller_dev *
  899. __reset_controller_by_name(const char *name)
  900. {
  901. struct reset_controller_dev *rcdev;
  902. lockdep_assert_held(&reset_list_mutex);
  903. list_for_each_entry(rcdev, &reset_controller_list, list) {
  904. if (!rcdev->dev)
  905. continue;
  906. if (!strcmp(name, dev_name(rcdev->dev)))
  907. return rcdev;
  908. }
  909. return NULL;
  910. }
  911. static struct reset_control *
  912. __reset_control_get_from_lookup(struct device *dev, const char *con_id,
  913. bool shared, bool optional, bool acquired)
  914. {
  915. const struct reset_control_lookup *lookup;
  916. struct reset_controller_dev *rcdev;
  917. const char *dev_id = dev_name(dev);
  918. struct reset_control *rstc = NULL;
  919. mutex_lock(&reset_lookup_mutex);
  920. list_for_each_entry(lookup, &reset_lookup_list, list) {
  921. if (strcmp(lookup->dev_id, dev_id))
  922. continue;
  923. if ((!con_id && !lookup->con_id) ||
  924. ((con_id && lookup->con_id) &&
  925. !strcmp(con_id, lookup->con_id))) {
  926. mutex_lock(&reset_list_mutex);
  927. rcdev = __reset_controller_by_name(lookup->provider);
  928. if (!rcdev) {
  929. mutex_unlock(&reset_list_mutex);
  930. mutex_unlock(&reset_lookup_mutex);
  931. /* Reset provider may not be ready yet. */
  932. return ERR_PTR(-EPROBE_DEFER);
  933. }
  934. rstc = __reset_control_get_internal(rcdev,
  935. lookup->index,
  936. shared, acquired);
  937. mutex_unlock(&reset_list_mutex);
  938. break;
  939. }
  940. }
  941. mutex_unlock(&reset_lookup_mutex);
  942. if (!rstc)
  943. return optional ? NULL : ERR_PTR(-ENOENT);
  944. return rstc;
  945. }
  946. struct reset_control *__reset_control_get(struct device *dev, const char *id,
  947. int index, bool shared, bool optional,
  948. bool acquired)
  949. {
  950. if (WARN_ON(shared && acquired))
  951. return ERR_PTR(-EINVAL);
  952. if (dev->of_node)
  953. return __of_reset_control_get(dev->of_node, id, index, shared,
  954. optional, acquired);
  955. return __reset_control_get_from_lookup(dev, id, shared, optional,
  956. acquired);
  957. }
  958. EXPORT_SYMBOL_GPL(__reset_control_get);
  959. int __reset_control_bulk_get(struct device *dev, int num_rstcs,
  960. struct reset_control_bulk_data *rstcs,
  961. bool shared, bool optional, bool acquired)
  962. {
  963. int ret, i;
  964. for (i = 0; i < num_rstcs; i++) {
  965. rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
  966. shared, optional, acquired);
  967. if (IS_ERR(rstcs[i].rstc)) {
  968. ret = PTR_ERR(rstcs[i].rstc);
  969. goto err;
  970. }
  971. }
  972. return 0;
  973. err:
  974. mutex_lock(&reset_list_mutex);
  975. while (i--)
  976. __reset_control_put_internal(rstcs[i].rstc);
  977. mutex_unlock(&reset_list_mutex);
  978. return ret;
  979. }
  980. EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
  981. static void reset_control_array_put(struct reset_control_array *resets)
  982. {
  983. int i;
  984. mutex_lock(&reset_list_mutex);
  985. for (i = 0; i < resets->num_rstcs; i++)
  986. __reset_control_put_internal(resets->rstc[i]);
  987. mutex_unlock(&reset_list_mutex);
  988. kfree(resets);
  989. }
  990. /**
  991. * reset_control_put - free the reset controller
  992. * @rstc: reset controller
  993. */
  994. void reset_control_put(struct reset_control *rstc)
  995. {
  996. if (IS_ERR_OR_NULL(rstc))
  997. return;
  998. if (reset_control_is_array(rstc)) {
  999. reset_control_array_put(rstc_to_array(rstc));
  1000. return;
  1001. }
  1002. mutex_lock(&reset_list_mutex);
  1003. __reset_control_put_internal(rstc);
  1004. mutex_unlock(&reset_list_mutex);
  1005. }
  1006. EXPORT_SYMBOL_GPL(reset_control_put);
  1007. /**
  1008. * reset_control_bulk_put - free the reset controllers
  1009. * @num_rstcs: number of entries in rstcs array
  1010. * @rstcs: array of struct reset_control_bulk_data with reset controls set
  1011. */
  1012. void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
  1013. {
  1014. mutex_lock(&reset_list_mutex);
  1015. while (num_rstcs--)
  1016. __reset_control_put_internal(rstcs[num_rstcs].rstc);
  1017. mutex_unlock(&reset_list_mutex);
  1018. }
  1019. EXPORT_SYMBOL_GPL(reset_control_bulk_put);
  1020. static void devm_reset_control_release(struct device *dev, void *res)
  1021. {
  1022. reset_control_put(*(struct reset_control **)res);
  1023. }
  1024. struct reset_control *
  1025. __devm_reset_control_get(struct device *dev, const char *id, int index,
  1026. bool shared, bool optional, bool acquired)
  1027. {
  1028. struct reset_control **ptr, *rstc;
  1029. ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
  1030. GFP_KERNEL);
  1031. if (!ptr)
  1032. return ERR_PTR(-ENOMEM);
  1033. rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
  1034. if (IS_ERR_OR_NULL(rstc)) {
  1035. devres_free(ptr);
  1036. return rstc;
  1037. }
  1038. *ptr = rstc;
  1039. devres_add(dev, ptr);
  1040. return rstc;
  1041. }
  1042. EXPORT_SYMBOL_GPL(__devm_reset_control_get);
  1043. struct reset_control_bulk_devres {
  1044. int num_rstcs;
  1045. struct reset_control_bulk_data *rstcs;
  1046. };
  1047. static void devm_reset_control_bulk_release(struct device *dev, void *res)
  1048. {
  1049. struct reset_control_bulk_devres *devres = res;
  1050. reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
  1051. }
  1052. int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
  1053. struct reset_control_bulk_data *rstcs,
  1054. bool shared, bool optional, bool acquired)
  1055. {
  1056. struct reset_control_bulk_devres *ptr;
  1057. int ret;
  1058. ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
  1059. GFP_KERNEL);
  1060. if (!ptr)
  1061. return -ENOMEM;
  1062. ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
  1063. if (ret < 0) {
  1064. devres_free(ptr);
  1065. return ret;
  1066. }
  1067. ptr->num_rstcs = num_rstcs;
  1068. ptr->rstcs = rstcs;
  1069. devres_add(dev, ptr);
  1070. return 0;
  1071. }
  1072. EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
  1073. /**
  1074. * __device_reset - find reset controller associated with the device
  1075. * and perform reset
  1076. * @dev: device to be reset by the controller
  1077. * @optional: whether it is optional to reset the device
  1078. *
  1079. * Convenience wrapper for __reset_control_get() and reset_control_reset().
  1080. * This is useful for the common case of devices with single, dedicated reset
  1081. * lines. _RST firmware method will be called for devices with ACPI.
  1082. */
  1083. int __device_reset(struct device *dev, bool optional)
  1084. {
  1085. struct reset_control *rstc;
  1086. int ret;
  1087. #ifdef CONFIG_ACPI
  1088. acpi_handle handle = ACPI_HANDLE(dev);
  1089. if (handle) {
  1090. if (!acpi_has_method(handle, "_RST"))
  1091. return optional ? 0 : -ENOENT;
  1092. if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL,
  1093. NULL)))
  1094. return -EIO;
  1095. }
  1096. #endif
  1097. rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
  1098. if (IS_ERR(rstc))
  1099. return PTR_ERR(rstc);
  1100. ret = reset_control_reset(rstc);
  1101. reset_control_put(rstc);
  1102. return ret;
  1103. }
  1104. EXPORT_SYMBOL_GPL(__device_reset);
  1105. /*
  1106. * APIs to manage an array of reset controls.
  1107. */
  1108. /**
  1109. * of_reset_control_get_count - Count number of resets available with a device
  1110. *
  1111. * @node: device node that contains 'resets'.
  1112. *
  1113. * Returns positive reset count on success, or error number on failure and
  1114. * on count being zero.
  1115. */
  1116. static int of_reset_control_get_count(struct device_node *node)
  1117. {
  1118. int count;
  1119. if (!node)
  1120. return -EINVAL;
  1121. count = of_count_phandle_with_args(node, "resets", "#reset-cells");
  1122. if (count == 0)
  1123. count = -ENOENT;
  1124. return count;
  1125. }
  1126. /**
  1127. * of_reset_control_array_get - Get a list of reset controls using
  1128. * device node.
  1129. *
  1130. * @np: device node for the device that requests the reset controls array
  1131. * @shared: whether reset controls are shared or not
  1132. * @optional: whether it is optional to get the reset controls
  1133. * @acquired: only one reset control may be acquired for a given controller
  1134. * and ID
  1135. *
  1136. * Returns pointer to allocated reset_control on success or error on failure
  1137. */
  1138. struct reset_control *
  1139. of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
  1140. bool acquired)
  1141. {
  1142. struct reset_control_array *resets;
  1143. struct reset_control *rstc;
  1144. int num, i;
  1145. num = of_reset_control_get_count(np);
  1146. if (num < 0)
  1147. return optional ? NULL : ERR_PTR(num);
  1148. resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
  1149. if (!resets)
  1150. return ERR_PTR(-ENOMEM);
  1151. resets->num_rstcs = num;
  1152. for (i = 0; i < num; i++) {
  1153. rstc = __of_reset_control_get(np, NULL, i, shared, optional,
  1154. acquired);
  1155. if (IS_ERR(rstc))
  1156. goto err_rst;
  1157. resets->rstc[i] = rstc;
  1158. }
  1159. resets->base.array = true;
  1160. return &resets->base;
  1161. err_rst:
  1162. mutex_lock(&reset_list_mutex);
  1163. while (--i >= 0)
  1164. __reset_control_put_internal(resets->rstc[i]);
  1165. mutex_unlock(&reset_list_mutex);
  1166. kfree(resets);
  1167. return rstc;
  1168. }
  1169. EXPORT_SYMBOL_GPL(of_reset_control_array_get);
  1170. /**
  1171. * devm_reset_control_array_get - Resource managed reset control array get
  1172. *
  1173. * @dev: device that requests the list of reset controls
  1174. * @shared: whether reset controls are shared or not
  1175. * @optional: whether it is optional to get the reset controls
  1176. *
  1177. * The reset control array APIs are intended for a list of resets
  1178. * that just have to be asserted or deasserted, without any
  1179. * requirements on the order.
  1180. *
  1181. * Returns pointer to allocated reset_control on success or error on failure
  1182. */
  1183. struct reset_control *
  1184. devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
  1185. {
  1186. struct reset_control **ptr, *rstc;
  1187. ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
  1188. GFP_KERNEL);
  1189. if (!ptr)
  1190. return ERR_PTR(-ENOMEM);
  1191. rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
  1192. if (IS_ERR_OR_NULL(rstc)) {
  1193. devres_free(ptr);
  1194. return rstc;
  1195. }
  1196. *ptr = rstc;
  1197. devres_add(dev, ptr);
  1198. return rstc;
  1199. }
  1200. EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
  1201. static int reset_control_get_count_from_lookup(struct device *dev)
  1202. {
  1203. const struct reset_control_lookup *lookup;
  1204. const char *dev_id;
  1205. int count = 0;
  1206. if (!dev)
  1207. return -EINVAL;
  1208. dev_id = dev_name(dev);
  1209. mutex_lock(&reset_lookup_mutex);
  1210. list_for_each_entry(lookup, &reset_lookup_list, list) {
  1211. if (!strcmp(lookup->dev_id, dev_id))
  1212. count++;
  1213. }
  1214. mutex_unlock(&reset_lookup_mutex);
  1215. if (count == 0)
  1216. count = -ENOENT;
  1217. return count;
  1218. }
  1219. /**
  1220. * reset_control_get_count - Count number of resets available with a device
  1221. *
  1222. * @dev: device for which to return the number of resets
  1223. *
  1224. * Returns positive reset count on success, or error number on failure and
  1225. * on count being zero.
  1226. */
  1227. int reset_control_get_count(struct device *dev)
  1228. {
  1229. if (dev->of_node)
  1230. return of_reset_control_get_count(dev->of_node);
  1231. return reset_control_get_count_from_lookup(dev);
  1232. }
  1233. EXPORT_SYMBOL_GPL(reset_control_get_count);