of_reserved_mem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Device tree based initialization code for reserved memory.
  4. *
  5. * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
  6. * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
  7. * http://www.samsung.com
  8. * Author: Marek Szyprowski <m.szyprowski@samsung.com>
  9. * Author: Josh Cartwright <joshc@codeaurora.org>
  10. */
  11. #define pr_fmt(fmt) "OF: reserved mem: " fmt
  12. #include <linux/err.h>
  13. #include <linux/libfdt.h>
  14. #include <linux/of.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/mm.h>
  18. #include <linux/sizes.h>
  19. #include <linux/of_reserved_mem.h>
  20. #include <linux/sort.h>
  21. #include <linux/slab.h>
  22. #include <linux/memblock.h>
  23. #include <linux/kmemleak.h>
  24. #include <linux/cma.h>
  25. #include <linux/dma-map-ops.h>
  26. #include "of_private.h"
  27. static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
  28. static int reserved_mem_count;
  29. static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
  30. phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
  31. phys_addr_t *res_base)
  32. {
  33. phys_addr_t base;
  34. int err = 0;
  35. end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
  36. align = !align ? SMP_CACHE_BYTES : align;
  37. base = memblock_phys_alloc_range(size, align, start, end);
  38. if (!base)
  39. return -ENOMEM;
  40. *res_base = base;
  41. if (nomap) {
  42. err = memblock_mark_nomap(base, size);
  43. if (err)
  44. memblock_phys_free(base, size);
  45. }
  46. if (!err)
  47. kmemleak_ignore_phys(base);
  48. return err;
  49. }
  50. static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
  51. /*
  52. * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
  53. */
  54. static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
  55. phys_addr_t base, phys_addr_t size)
  56. {
  57. struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
  58. if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
  59. pr_err("not enough space for all defined regions.\n");
  60. return;
  61. }
  62. rmem->fdt_node = node;
  63. rmem->name = uname;
  64. rmem->base = base;
  65. rmem->size = size;
  66. /* Call the region specific initialization function */
  67. fdt_init_reserved_mem_node(rmem);
  68. reserved_mem_count++;
  69. return;
  70. }
  71. static int __init early_init_dt_reserve_memory(phys_addr_t base,
  72. phys_addr_t size, bool nomap)
  73. {
  74. if (nomap) {
  75. /*
  76. * If the memory is already reserved (by another region), we
  77. * should not allow it to be marked nomap, but don't worry
  78. * if the region isn't memory as it won't be mapped.
  79. */
  80. if (memblock_overlaps_region(&memblock.memory, base, size) &&
  81. memblock_is_region_reserved(base, size))
  82. return -EBUSY;
  83. return memblock_mark_nomap(base, size);
  84. }
  85. return memblock_reserve(base, size);
  86. }
  87. /*
  88. * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
  89. */
  90. static int __init __reserved_mem_reserve_reg(unsigned long node,
  91. const char *uname)
  92. {
  93. int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
  94. phys_addr_t base, size;
  95. int len;
  96. const __be32 *prop;
  97. bool nomap;
  98. prop = of_get_flat_dt_prop(node, "reg", &len);
  99. if (!prop)
  100. return -ENOENT;
  101. if (len && len % t_len != 0) {
  102. pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
  103. uname);
  104. return -EINVAL;
  105. }
  106. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  107. while (len >= t_len) {
  108. base = dt_mem_next_cell(dt_root_addr_cells, &prop);
  109. size = dt_mem_next_cell(dt_root_size_cells, &prop);
  110. if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
  111. /* Architecture specific contiguous memory fixup. */
  112. if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
  113. of_get_flat_dt_prop(node, "reusable", NULL))
  114. dma_contiguous_early_fixup(base, size);
  115. pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
  116. uname, &base, (unsigned long)(size / SZ_1M));
  117. } else {
  118. pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
  119. uname, &base, (unsigned long)(size / SZ_1M));
  120. }
  121. len -= t_len;
  122. }
  123. return 0;
  124. }
  125. /*
  126. * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
  127. * in /reserved-memory matches the values supported by the current implementation,
  128. * also check if ranges property has been provided
  129. */
  130. static int __init __reserved_mem_check_root(unsigned long node)
  131. {
  132. const __be32 *prop;
  133. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  134. if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
  135. return -EINVAL;
  136. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  137. if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
  138. return -EINVAL;
  139. prop = of_get_flat_dt_prop(node, "ranges", NULL);
  140. if (!prop)
  141. return -EINVAL;
  142. return 0;
  143. }
  144. static void __init __rmem_check_for_overlap(void);
  145. /**
  146. * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
  147. * reserved memory regions.
  148. *
  149. * This function is used to scan through the DT and store the
  150. * information for the reserved memory regions that are defined using
  151. * the "reg" property. The region node number, name, base address, and
  152. * size are all stored in the reserved_mem array by calling the
  153. * fdt_reserved_mem_save_node() function.
  154. */
  155. void __init fdt_scan_reserved_mem_reg_nodes(void)
  156. {
  157. int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
  158. const void *fdt = initial_boot_params;
  159. phys_addr_t base, size;
  160. const __be32 *prop;
  161. int node, child;
  162. int len;
  163. if (!fdt)
  164. return;
  165. node = fdt_path_offset(fdt, "/reserved-memory");
  166. if (node < 0) {
  167. pr_info("Reserved memory: No reserved-memory node in the DT\n");
  168. return;
  169. }
  170. if (__reserved_mem_check_root(node)) {
  171. pr_err("Reserved memory: unsupported node format, ignoring\n");
  172. return;
  173. }
  174. fdt_for_each_subnode(child, fdt, node) {
  175. const char *uname;
  176. prop = of_get_flat_dt_prop(child, "reg", &len);
  177. if (!prop)
  178. continue;
  179. if (!of_fdt_device_is_available(fdt, child))
  180. continue;
  181. uname = fdt_get_name(fdt, child, NULL);
  182. if (len && len % t_len != 0) {
  183. pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
  184. uname);
  185. continue;
  186. }
  187. if (len > t_len)
  188. pr_warn("%s() ignores %d regions in node '%s'\n",
  189. __func__, len / t_len - 1, uname);
  190. base = dt_mem_next_cell(dt_root_addr_cells, &prop);
  191. size = dt_mem_next_cell(dt_root_size_cells, &prop);
  192. if (size)
  193. fdt_reserved_mem_save_node(child, uname, base, size);
  194. }
  195. /* check for overlapping reserved regions */
  196. __rmem_check_for_overlap();
  197. }
  198. static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
  199. /*
  200. * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
  201. */
  202. int __init fdt_scan_reserved_mem(void)
  203. {
  204. int node, child;
  205. int dynamic_nodes_cnt = 0;
  206. int dynamic_nodes[MAX_RESERVED_REGIONS];
  207. const void *fdt = initial_boot_params;
  208. node = fdt_path_offset(fdt, "/reserved-memory");
  209. if (node < 0)
  210. return -ENODEV;
  211. if (__reserved_mem_check_root(node) != 0) {
  212. pr_err("Reserved memory: unsupported node format, ignoring\n");
  213. return -EINVAL;
  214. }
  215. fdt_for_each_subnode(child, fdt, node) {
  216. const char *uname;
  217. int err;
  218. if (!of_fdt_device_is_available(fdt, child))
  219. continue;
  220. uname = fdt_get_name(fdt, child, NULL);
  221. err = __reserved_mem_reserve_reg(child, uname);
  222. /*
  223. * Save the nodes for the dynamically-placed regions
  224. * into an array which will be used for allocation right
  225. * after all the statically-placed regions are reserved
  226. * or marked as no-map. This is done to avoid dynamically
  227. * allocating from one of the statically-placed regions.
  228. */
  229. if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
  230. dynamic_nodes[dynamic_nodes_cnt] = child;
  231. dynamic_nodes_cnt++;
  232. }
  233. }
  234. for (int i = 0; i < dynamic_nodes_cnt; i++) {
  235. const char *uname;
  236. child = dynamic_nodes[i];
  237. uname = fdt_get_name(fdt, child, NULL);
  238. __reserved_mem_alloc_size(child, uname);
  239. }
  240. return 0;
  241. }
  242. /*
  243. * __reserved_mem_alloc_in_range() - allocate reserved memory described with
  244. * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
  245. * reserved regions to keep the reserved memory contiguous if possible.
  246. */
  247. static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
  248. phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
  249. phys_addr_t *res_base)
  250. {
  251. bool prev_bottom_up = memblock_bottom_up();
  252. bool bottom_up = false, top_down = false;
  253. int ret, i;
  254. for (i = 0; i < reserved_mem_count; i++) {
  255. struct reserved_mem *rmem = &reserved_mem[i];
  256. /* Skip regions that were not reserved yet */
  257. if (rmem->size == 0)
  258. continue;
  259. /*
  260. * If range starts next to an existing reservation, use bottom-up:
  261. * |....RRRR................RRRRRRRR..............|
  262. * --RRRR------
  263. */
  264. if (start >= rmem->base && start <= (rmem->base + rmem->size))
  265. bottom_up = true;
  266. /*
  267. * If range ends next to an existing reservation, use top-down:
  268. * |....RRRR................RRRRRRRR..............|
  269. * -------RRRR-----
  270. */
  271. if (end >= rmem->base && end <= (rmem->base + rmem->size))
  272. top_down = true;
  273. }
  274. /* Change setting only if either bottom-up or top-down was selected */
  275. if (bottom_up != top_down)
  276. memblock_set_bottom_up(bottom_up);
  277. ret = early_init_dt_alloc_reserved_memory_arch(size, align,
  278. start, end, nomap, res_base);
  279. /* Restore old setting if needed */
  280. if (bottom_up != top_down)
  281. memblock_set_bottom_up(prev_bottom_up);
  282. return ret;
  283. }
  284. /*
  285. * __reserved_mem_alloc_size() - allocate reserved memory described by
  286. * 'size', 'alignment' and 'alloc-ranges' properties.
  287. */
  288. static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
  289. {
  290. int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
  291. phys_addr_t start = 0, end = 0;
  292. phys_addr_t base = 0, align = 0, size;
  293. int len;
  294. const __be32 *prop;
  295. bool nomap;
  296. int ret;
  297. prop = of_get_flat_dt_prop(node, "size", &len);
  298. if (!prop)
  299. return -EINVAL;
  300. if (len != dt_root_size_cells * sizeof(__be32)) {
  301. pr_err("invalid size property in '%s' node.\n", uname);
  302. return -EINVAL;
  303. }
  304. size = dt_mem_next_cell(dt_root_size_cells, &prop);
  305. prop = of_get_flat_dt_prop(node, "alignment", &len);
  306. if (prop) {
  307. if (len != dt_root_addr_cells * sizeof(__be32)) {
  308. pr_err("invalid alignment property in '%s' node.\n",
  309. uname);
  310. return -EINVAL;
  311. }
  312. align = dt_mem_next_cell(dt_root_addr_cells, &prop);
  313. }
  314. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  315. /* Need adjust the alignment to satisfy the CMA requirement */
  316. if (IS_ENABLED(CONFIG_CMA)
  317. && of_flat_dt_is_compatible(node, "shared-dma-pool")
  318. && of_get_flat_dt_prop(node, "reusable", NULL)
  319. && !nomap)
  320. align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
  321. prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
  322. if (prop) {
  323. if (len % t_len != 0) {
  324. pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
  325. uname);
  326. return -EINVAL;
  327. }
  328. base = 0;
  329. while (len > 0) {
  330. start = dt_mem_next_cell(dt_root_addr_cells, &prop);
  331. end = start + dt_mem_next_cell(dt_root_size_cells,
  332. &prop);
  333. ret = __reserved_mem_alloc_in_range(size, align,
  334. start, end, nomap, &base);
  335. if (ret == 0) {
  336. pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
  337. uname, &base,
  338. (unsigned long)(size / SZ_1M));
  339. break;
  340. }
  341. len -= t_len;
  342. }
  343. } else {
  344. ret = early_init_dt_alloc_reserved_memory_arch(size, align,
  345. 0, 0, nomap, &base);
  346. if (ret == 0)
  347. pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
  348. uname, &base, (unsigned long)(size / SZ_1M));
  349. }
  350. if (base == 0) {
  351. pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
  352. uname, (unsigned long)(size / SZ_1M));
  353. return -ENOMEM;
  354. }
  355. /* Architecture specific contiguous memory fixup. */
  356. if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
  357. of_get_flat_dt_prop(node, "reusable", NULL))
  358. dma_contiguous_early_fixup(base, size);
  359. /* Save region in the reserved_mem array */
  360. fdt_reserved_mem_save_node(node, uname, base, size);
  361. return 0;
  362. }
  363. static const struct of_device_id __rmem_of_table_sentinel
  364. __used __section("__reservedmem_of_table_end");
  365. /*
  366. * __reserved_mem_init_node() - call region specific reserved memory init code
  367. */
  368. static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
  369. {
  370. extern const struct of_device_id __reservedmem_of_table[];
  371. const struct of_device_id *i;
  372. int ret = -ENOENT;
  373. for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
  374. reservedmem_of_init_fn initfn = i->data;
  375. const char *compat = i->compatible;
  376. if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
  377. continue;
  378. ret = initfn(rmem);
  379. if (ret == 0) {
  380. pr_info("initialized node %s, compatible id %s\n",
  381. rmem->name, compat);
  382. break;
  383. }
  384. }
  385. return ret;
  386. }
  387. static int __init __rmem_cmp(const void *a, const void *b)
  388. {
  389. const struct reserved_mem *ra = a, *rb = b;
  390. if (ra->base < rb->base)
  391. return -1;
  392. if (ra->base > rb->base)
  393. return 1;
  394. /*
  395. * Put the dynamic allocations (address == 0, size == 0) before static
  396. * allocations at address 0x0 so that overlap detection works
  397. * correctly.
  398. */
  399. if (ra->size < rb->size)
  400. return -1;
  401. if (ra->size > rb->size)
  402. return 1;
  403. if (ra->fdt_node < rb->fdt_node)
  404. return -1;
  405. if (ra->fdt_node > rb->fdt_node)
  406. return 1;
  407. return 0;
  408. }
  409. static void __init __rmem_check_for_overlap(void)
  410. {
  411. int i;
  412. if (reserved_mem_count < 2)
  413. return;
  414. sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
  415. __rmem_cmp, NULL);
  416. for (i = 0; i < reserved_mem_count - 1; i++) {
  417. struct reserved_mem *this, *next;
  418. this = &reserved_mem[i];
  419. next = &reserved_mem[i + 1];
  420. if (this->base + this->size > next->base) {
  421. phys_addr_t this_end, next_end;
  422. this_end = this->base + this->size;
  423. next_end = next->base + next->size;
  424. pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
  425. this->name, &this->base, &this_end,
  426. next->name, &next->base, &next_end);
  427. }
  428. }
  429. }
  430. /**
  431. * fdt_init_reserved_mem_node() - Initialize a reserved memory region
  432. * @rmem: reserved_mem struct of the memory region to be initialized.
  433. *
  434. * This function is used to call the region specific initialization
  435. * function for a reserved memory region.
  436. */
  437. static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
  438. {
  439. unsigned long node = rmem->fdt_node;
  440. int err = 0;
  441. bool nomap;
  442. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  443. err = __reserved_mem_init_node(rmem);
  444. if (err != 0 && err != -ENOENT) {
  445. pr_info("node %s compatible matching fail\n", rmem->name);
  446. if (nomap)
  447. memblock_clear_nomap(rmem->base, rmem->size);
  448. else
  449. memblock_phys_free(rmem->base, rmem->size);
  450. } else {
  451. phys_addr_t end = rmem->base + rmem->size - 1;
  452. bool reusable =
  453. (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
  454. pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
  455. &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
  456. nomap ? "nomap" : "map",
  457. reusable ? "reusable" : "non-reusable",
  458. rmem->name ? rmem->name : "unknown");
  459. }
  460. }
  461. struct rmem_assigned_device {
  462. struct device *dev;
  463. struct reserved_mem *rmem;
  464. struct list_head list;
  465. };
  466. static LIST_HEAD(of_rmem_assigned_device_list);
  467. static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
  468. /**
  469. * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
  470. * given device
  471. * @dev: Pointer to the device to configure
  472. * @np: Pointer to the device_node with 'reserved-memory' property
  473. * @idx: Index of selected region
  474. *
  475. * This function assigns respective DMA-mapping operations based on reserved
  476. * memory region specified by 'memory-region' property in @np node to the @dev
  477. * device. When driver needs to use more than one reserved memory region, it
  478. * should allocate child devices and initialize regions by name for each of
  479. * child device.
  480. *
  481. * Returns error code or zero on success.
  482. */
  483. int of_reserved_mem_device_init_by_idx(struct device *dev,
  484. struct device_node *np, int idx)
  485. {
  486. struct rmem_assigned_device *rd;
  487. struct device_node *target;
  488. struct reserved_mem *rmem;
  489. int ret;
  490. if (!np || !dev)
  491. return -EINVAL;
  492. target = of_parse_phandle(np, "memory-region", idx);
  493. if (!target)
  494. return -ENODEV;
  495. if (!of_device_is_available(target)) {
  496. of_node_put(target);
  497. return 0;
  498. }
  499. rmem = of_reserved_mem_lookup(target);
  500. of_node_put(target);
  501. if (!rmem || !rmem->ops || !rmem->ops->device_init)
  502. return -EINVAL;
  503. rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
  504. if (!rd)
  505. return -ENOMEM;
  506. ret = rmem->ops->device_init(rmem, dev);
  507. if (ret == 0) {
  508. rd->dev = dev;
  509. rd->rmem = rmem;
  510. mutex_lock(&of_rmem_assigned_device_mutex);
  511. list_add(&rd->list, &of_rmem_assigned_device_list);
  512. mutex_unlock(&of_rmem_assigned_device_mutex);
  513. dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
  514. } else {
  515. kfree(rd);
  516. }
  517. return ret;
  518. }
  519. EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
  520. /**
  521. * of_reserved_mem_device_init_by_name() - assign named reserved memory region
  522. * to given device
  523. * @dev: pointer to the device to configure
  524. * @np: pointer to the device node with 'memory-region' property
  525. * @name: name of the selected memory region
  526. *
  527. * Returns: 0 on success or a negative error-code on failure.
  528. */
  529. int of_reserved_mem_device_init_by_name(struct device *dev,
  530. struct device_node *np,
  531. const char *name)
  532. {
  533. int idx = of_property_match_string(np, "memory-region-names", name);
  534. return of_reserved_mem_device_init_by_idx(dev, np, idx);
  535. }
  536. EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
  537. /**
  538. * of_reserved_mem_device_release() - release reserved memory device structures
  539. * @dev: Pointer to the device to deconfigure
  540. *
  541. * This function releases structures allocated for memory region handling for
  542. * the given device.
  543. */
  544. void of_reserved_mem_device_release(struct device *dev)
  545. {
  546. struct rmem_assigned_device *rd, *tmp;
  547. LIST_HEAD(release_list);
  548. mutex_lock(&of_rmem_assigned_device_mutex);
  549. list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
  550. if (rd->dev == dev)
  551. list_move_tail(&rd->list, &release_list);
  552. }
  553. mutex_unlock(&of_rmem_assigned_device_mutex);
  554. list_for_each_entry_safe(rd, tmp, &release_list, list) {
  555. if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
  556. rd->rmem->ops->device_release(rd->rmem, dev);
  557. kfree(rd);
  558. }
  559. }
  560. EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
  561. /**
  562. * of_reserved_mem_lookup() - acquire reserved_mem from a device node
  563. * @np: node pointer of the desired reserved-memory region
  564. *
  565. * This function allows drivers to acquire a reference to the reserved_mem
  566. * struct based on a device node handle.
  567. *
  568. * Returns a reserved_mem reference, or NULL on error.
  569. */
  570. struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
  571. {
  572. const char *name;
  573. int i;
  574. if (!np->full_name)
  575. return NULL;
  576. name = kbasename(np->full_name);
  577. for (i = 0; i < reserved_mem_count; i++)
  578. if (!strcmp(reserved_mem[i].name, name))
  579. return &reserved_mem[i];
  580. return NULL;
  581. }
  582. EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);