fdt.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions for working with the Flattened Device Tree data format
  4. *
  5. * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
  6. * benh@kernel.crashing.org
  7. */
  8. #define pr_fmt(fmt) "OF: fdt: " fmt
  9. #include <linux/crash_dump.h>
  10. #include <linux/crc32.h>
  11. #include <linux/kernel.h>
  12. #include <linux/initrd.h>
  13. #include <linux/memblock.h>
  14. #include <linux/mutex.h>
  15. #include <linux/of.h>
  16. #include <linux/of_fdt.h>
  17. #include <linux/sizes.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/slab.h>
  21. #include <linux/libfdt.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/serial_core.h>
  24. #include <linux/sysfs.h>
  25. #include <linux/random.h>
  26. #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
  27. #include <asm/page.h>
  28. #include "of_private.h"
  29. /*
  30. * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
  31. * cmd_wrap_S_dtb in scripts/Makefile.dtbs
  32. */
  33. extern uint8_t __dtb_empty_root_begin[];
  34. extern uint8_t __dtb_empty_root_end[];
  35. /*
  36. * of_fdt_limit_memory - limit the number of regions in the /memory node
  37. * @limit: maximum entries
  38. *
  39. * Adjust the flattened device tree to have at most 'limit' number of
  40. * memory entries in the /memory node. This function may be called
  41. * any time after initial_boot_param is set.
  42. */
  43. void __init of_fdt_limit_memory(int limit)
  44. {
  45. int memory;
  46. int len;
  47. const void *val;
  48. int cell_size = sizeof(uint32_t)*(dt_root_addr_cells + dt_root_size_cells);
  49. memory = fdt_path_offset(initial_boot_params, "/memory");
  50. if (memory > 0) {
  51. val = fdt_getprop(initial_boot_params, memory, "reg", &len);
  52. if (len > limit*cell_size) {
  53. len = limit*cell_size;
  54. pr_debug("Limiting number of entries to %d\n", limit);
  55. fdt_setprop(initial_boot_params, memory, "reg", val,
  56. len);
  57. }
  58. }
  59. }
  60. bool of_fdt_device_is_available(const void *blob, unsigned long node)
  61. {
  62. const char *status = fdt_getprop(blob, node, "status", NULL);
  63. if (!status)
  64. return true;
  65. if (!strcmp(status, "ok") || !strcmp(status, "okay"))
  66. return true;
  67. return false;
  68. }
  69. static void *unflatten_dt_alloc(void **mem, unsigned long size,
  70. unsigned long align)
  71. {
  72. void *res;
  73. *mem = PTR_ALIGN(*mem, align);
  74. res = *mem;
  75. *mem += size;
  76. return res;
  77. }
  78. static void populate_properties(const void *blob,
  79. int offset,
  80. void **mem,
  81. struct device_node *np,
  82. const char *nodename,
  83. bool dryrun)
  84. {
  85. struct property *pp, **pprev = NULL;
  86. int cur;
  87. bool has_name = false;
  88. pprev = &np->properties;
  89. for (cur = fdt_first_property_offset(blob, offset);
  90. cur >= 0;
  91. cur = fdt_next_property_offset(blob, cur)) {
  92. const __be32 *val;
  93. const char *pname;
  94. u32 sz;
  95. val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
  96. if (!val) {
  97. pr_warn("Cannot locate property at 0x%x\n", cur);
  98. continue;
  99. }
  100. if (!pname) {
  101. pr_warn("Cannot find property name at 0x%x\n", cur);
  102. continue;
  103. }
  104. if (!strcmp(pname, "name"))
  105. has_name = true;
  106. pp = unflatten_dt_alloc(mem, sizeof(struct property),
  107. __alignof__(struct property));
  108. if (dryrun)
  109. continue;
  110. /* We accept flattened tree phandles either in
  111. * ePAPR-style "phandle" properties, or the
  112. * legacy "linux,phandle" properties. If both
  113. * appear and have different values, things
  114. * will get weird. Don't do that.
  115. */
  116. if (!strcmp(pname, "phandle") ||
  117. !strcmp(pname, "linux,phandle")) {
  118. if (!np->phandle)
  119. np->phandle = be32_to_cpup(val);
  120. }
  121. /* And we process the "ibm,phandle" property
  122. * used in pSeries dynamic device tree
  123. * stuff
  124. */
  125. if (!strcmp(pname, "ibm,phandle"))
  126. np->phandle = be32_to_cpup(val);
  127. pp->name = (char *)pname;
  128. pp->length = sz;
  129. pp->value = (__be32 *)val;
  130. *pprev = pp;
  131. pprev = &pp->next;
  132. }
  133. /* With version 0x10 we may not have the name property,
  134. * recreate it here from the unit name if absent
  135. */
  136. if (!has_name) {
  137. const char *p = nodename, *ps = p, *pa = NULL;
  138. int len;
  139. while (*p) {
  140. if ((*p) == '@')
  141. pa = p;
  142. else if ((*p) == '/')
  143. ps = p + 1;
  144. p++;
  145. }
  146. if (pa < ps)
  147. pa = p;
  148. len = (pa - ps) + 1;
  149. pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
  150. __alignof__(struct property));
  151. if (!dryrun) {
  152. pp->name = "name";
  153. pp->length = len;
  154. pp->value = pp + 1;
  155. *pprev = pp;
  156. memcpy(pp->value, ps, len - 1);
  157. ((char *)pp->value)[len - 1] = 0;
  158. pr_debug("fixed up name for %s -> %s\n",
  159. nodename, (char *)pp->value);
  160. }
  161. }
  162. }
  163. static int populate_node(const void *blob,
  164. int offset,
  165. void **mem,
  166. struct device_node *dad,
  167. struct device_node **pnp,
  168. bool dryrun)
  169. {
  170. struct device_node *np;
  171. const char *pathp;
  172. int len;
  173. pathp = fdt_get_name(blob, offset, &len);
  174. if (!pathp) {
  175. *pnp = NULL;
  176. return len;
  177. }
  178. len++;
  179. np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
  180. __alignof__(struct device_node));
  181. if (!dryrun) {
  182. char *fn;
  183. of_node_init(np);
  184. np->full_name = fn = ((char *)np) + sizeof(*np);
  185. memcpy(fn, pathp, len);
  186. if (dad != NULL) {
  187. np->parent = dad;
  188. np->sibling = dad->child;
  189. dad->child = np;
  190. }
  191. }
  192. populate_properties(blob, offset, mem, np, pathp, dryrun);
  193. if (!dryrun) {
  194. np->name = of_get_property(np, "name", NULL);
  195. if (!np->name)
  196. np->name = "<NULL>";
  197. }
  198. *pnp = np;
  199. return 0;
  200. }
  201. static void reverse_nodes(struct device_node *parent)
  202. {
  203. struct device_node *child, *next;
  204. /* In-depth first */
  205. child = parent->child;
  206. while (child) {
  207. reverse_nodes(child);
  208. child = child->sibling;
  209. }
  210. /* Reverse the nodes in the child list */
  211. child = parent->child;
  212. parent->child = NULL;
  213. while (child) {
  214. next = child->sibling;
  215. child->sibling = parent->child;
  216. parent->child = child;
  217. child = next;
  218. }
  219. }
  220. /**
  221. * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
  222. * @blob: The parent device tree blob
  223. * @mem: Memory chunk to use for allocating device nodes and properties
  224. * @dad: Parent struct device_node
  225. * @nodepp: The device_node tree created by the call
  226. *
  227. * Return: The size of unflattened device tree or error code
  228. */
  229. static int unflatten_dt_nodes(const void *blob,
  230. void *mem,
  231. struct device_node *dad,
  232. struct device_node **nodepp)
  233. {
  234. struct device_node *root;
  235. int offset = 0, depth = 0, initial_depth = 0;
  236. #define FDT_MAX_DEPTH 64
  237. struct device_node *nps[FDT_MAX_DEPTH];
  238. void *base = mem;
  239. bool dryrun = !base;
  240. int ret;
  241. if (nodepp)
  242. *nodepp = NULL;
  243. /*
  244. * We're unflattening device sub-tree if @dad is valid. There are
  245. * possibly multiple nodes in the first level of depth. We need
  246. * set @depth to 1 to make fdt_next_node() happy as it bails
  247. * immediately when negative @depth is found. Otherwise, the device
  248. * nodes except the first one won't be unflattened successfully.
  249. */
  250. if (dad)
  251. depth = initial_depth = 1;
  252. root = dad;
  253. nps[depth] = dad;
  254. for (offset = 0;
  255. offset >= 0 && depth >= initial_depth;
  256. offset = fdt_next_node(blob, offset, &depth)) {
  257. if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
  258. continue;
  259. if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
  260. !of_fdt_device_is_available(blob, offset))
  261. continue;
  262. ret = populate_node(blob, offset, &mem, nps[depth],
  263. &nps[depth+1], dryrun);
  264. if (ret < 0)
  265. return ret;
  266. if (!dryrun && nodepp && !*nodepp)
  267. *nodepp = nps[depth+1];
  268. if (!dryrun && !root)
  269. root = nps[depth+1];
  270. }
  271. if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
  272. pr_err("Error %d processing FDT\n", offset);
  273. return -EINVAL;
  274. }
  275. /*
  276. * Reverse the child list. Some drivers assumes node order matches .dts
  277. * node order
  278. */
  279. if (!dryrun)
  280. reverse_nodes(root);
  281. return mem - base;
  282. }
  283. /**
  284. * __unflatten_device_tree - create tree of device_nodes from flat blob
  285. * @blob: The blob to expand
  286. * @dad: Parent device node
  287. * @mynodes: The device_node tree created by the call
  288. * @dt_alloc: An allocator that provides a virtual address to memory
  289. * for the resulting tree
  290. * @detached: if true set OF_DETACHED on @mynodes
  291. *
  292. * unflattens a device-tree, creating the tree of struct device_node. It also
  293. * fills the "name" and "type" pointers of the nodes so the normal device-tree
  294. * walking functions can be used.
  295. *
  296. * Return: NULL on failure or the memory chunk containing the unflattened
  297. * device tree on success.
  298. */
  299. void *__unflatten_device_tree(const void *blob,
  300. struct device_node *dad,
  301. struct device_node **mynodes,
  302. void *(*dt_alloc)(u64 size, u64 align),
  303. bool detached)
  304. {
  305. int size;
  306. void *mem;
  307. int ret;
  308. if (mynodes)
  309. *mynodes = NULL;
  310. pr_debug(" -> unflatten_device_tree()\n");
  311. if (!blob) {
  312. pr_debug("No device tree pointer\n");
  313. return NULL;
  314. }
  315. pr_debug("Unflattening device tree:\n");
  316. pr_debug("magic: %08x\n", fdt_magic(blob));
  317. pr_debug("size: %08x\n", fdt_totalsize(blob));
  318. pr_debug("version: %08x\n", fdt_version(blob));
  319. if (fdt_check_header(blob)) {
  320. pr_err("Invalid device tree blob header\n");
  321. return NULL;
  322. }
  323. /* First pass, scan for size */
  324. size = unflatten_dt_nodes(blob, NULL, dad, NULL);
  325. if (size <= 0)
  326. return NULL;
  327. size = ALIGN(size, 4);
  328. pr_debug(" size is %d, allocating...\n", size);
  329. /* Allocate memory for the expanded device tree */
  330. mem = dt_alloc(size + 4, __alignof__(struct device_node));
  331. if (!mem)
  332. return NULL;
  333. memset(mem, 0, size);
  334. *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
  335. pr_debug(" unflattening %p...\n", mem);
  336. /* Second pass, do actual unflattening */
  337. ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
  338. if (be32_to_cpup(mem + size) != 0xdeadbeef)
  339. pr_warn("End of tree marker overwritten: %08x\n",
  340. be32_to_cpup(mem + size));
  341. if (ret <= 0)
  342. return NULL;
  343. if (detached && mynodes && *mynodes) {
  344. of_node_set_flag(*mynodes, OF_DETACHED);
  345. pr_debug("unflattened tree is detached\n");
  346. }
  347. pr_debug(" <- unflatten_device_tree()\n");
  348. return mem;
  349. }
  350. static void *kernel_tree_alloc(u64 size, u64 align)
  351. {
  352. return kzalloc(size, GFP_KERNEL);
  353. }
  354. static DEFINE_MUTEX(of_fdt_unflatten_mutex);
  355. /**
  356. * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
  357. * @blob: Flat device tree blob
  358. * @dad: Parent device node
  359. * @mynodes: The device tree created by the call
  360. *
  361. * unflattens the device-tree passed by the firmware, creating the
  362. * tree of struct device_node. It also fills the "name" and "type"
  363. * pointers of the nodes so the normal device-tree walking functions
  364. * can be used.
  365. *
  366. * Return: NULL on failure or the memory chunk containing the unflattened
  367. * device tree on success.
  368. */
  369. void *of_fdt_unflatten_tree(const unsigned long *blob,
  370. struct device_node *dad,
  371. struct device_node **mynodes)
  372. {
  373. void *mem;
  374. mutex_lock(&of_fdt_unflatten_mutex);
  375. mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
  376. true);
  377. mutex_unlock(&of_fdt_unflatten_mutex);
  378. return mem;
  379. }
  380. EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
  381. /* Everything below here references initial_boot_params directly. */
  382. int __initdata dt_root_addr_cells;
  383. int __initdata dt_root_size_cells;
  384. void *initial_boot_params __ro_after_init;
  385. phys_addr_t initial_boot_params_pa __ro_after_init;
  386. #ifdef CONFIG_OF_EARLY_FLATTREE
  387. static u32 of_fdt_crc32;
  388. /*
  389. * fdt_reserve_elfcorehdr() - reserves memory for elf core header
  390. *
  391. * This function reserves the memory occupied by an elf core header
  392. * described in the device tree. This region contains all the
  393. * information about primary kernel's core image and is used by a dump
  394. * capture kernel to access the system memory on primary kernel.
  395. */
  396. static void __init fdt_reserve_elfcorehdr(void)
  397. {
  398. if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
  399. return;
  400. if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
  401. pr_warn("elfcorehdr is overlapped\n");
  402. return;
  403. }
  404. memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
  405. pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
  406. elfcorehdr_size >> 10, elfcorehdr_addr);
  407. }
  408. /**
  409. * early_init_fdt_scan_reserved_mem() - create reserved memory regions
  410. *
  411. * This function grabs memory from early allocator for device exclusive use
  412. * defined in device tree structures. It should be called by arch specific code
  413. * once the early allocator (i.e. memblock) has been fully activated.
  414. */
  415. void __init early_init_fdt_scan_reserved_mem(void)
  416. {
  417. int n;
  418. u64 base, size;
  419. if (!initial_boot_params)
  420. return;
  421. fdt_scan_reserved_mem();
  422. fdt_reserve_elfcorehdr();
  423. /* Process header /memreserve/ fields */
  424. for (n = 0; ; n++) {
  425. fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
  426. if (!size)
  427. break;
  428. memblock_reserve(base, size);
  429. }
  430. }
  431. /**
  432. * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
  433. */
  434. void __init early_init_fdt_reserve_self(void)
  435. {
  436. if (!initial_boot_params)
  437. return;
  438. /* Reserve the dtb region */
  439. memblock_reserve(__pa(initial_boot_params),
  440. fdt_totalsize(initial_boot_params));
  441. }
  442. /**
  443. * of_scan_flat_dt - scan flattened tree blob and call callback on each.
  444. * @it: callback function
  445. * @data: context data pointer
  446. *
  447. * This function is used to scan the flattened device-tree, it is
  448. * used to extract the memory information at boot before we can
  449. * unflatten the tree
  450. */
  451. int __init of_scan_flat_dt(int (*it)(unsigned long node,
  452. const char *uname, int depth,
  453. void *data),
  454. void *data)
  455. {
  456. const void *blob = initial_boot_params;
  457. const char *pathp;
  458. int offset, rc = 0, depth = -1;
  459. if (!blob)
  460. return 0;
  461. for (offset = fdt_next_node(blob, -1, &depth);
  462. offset >= 0 && depth >= 0 && !rc;
  463. offset = fdt_next_node(blob, offset, &depth)) {
  464. pathp = fdt_get_name(blob, offset, NULL);
  465. rc = it(offset, pathp, depth, data);
  466. }
  467. return rc;
  468. }
  469. /**
  470. * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
  471. * @parent: parent node
  472. * @it: callback function
  473. * @data: context data pointer
  474. *
  475. * This function is used to scan sub-nodes of a node.
  476. */
  477. int __init of_scan_flat_dt_subnodes(unsigned long parent,
  478. int (*it)(unsigned long node,
  479. const char *uname,
  480. void *data),
  481. void *data)
  482. {
  483. const void *blob = initial_boot_params;
  484. int node;
  485. fdt_for_each_subnode(node, blob, parent) {
  486. const char *pathp;
  487. int rc;
  488. pathp = fdt_get_name(blob, node, NULL);
  489. rc = it(node, pathp, data);
  490. if (rc)
  491. return rc;
  492. }
  493. return 0;
  494. }
  495. /**
  496. * of_get_flat_dt_subnode_by_name - get the subnode by given name
  497. *
  498. * @node: the parent node
  499. * @uname: the name of subnode
  500. * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
  501. */
  502. int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
  503. {
  504. return fdt_subnode_offset(initial_boot_params, node, uname);
  505. }
  506. /*
  507. * of_get_flat_dt_root - find the root node in the flat blob
  508. */
  509. unsigned long __init of_get_flat_dt_root(void)
  510. {
  511. return 0;
  512. }
  513. /*
  514. * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
  515. *
  516. * This function can be used within scan_flattened_dt callback to get
  517. * access to properties
  518. */
  519. const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
  520. int *size)
  521. {
  522. return fdt_getprop(initial_boot_params, node, name, size);
  523. }
  524. /**
  525. * of_fdt_is_compatible - Return true if given node from the given blob has
  526. * compat in its compatible list
  527. * @blob: A device tree blob
  528. * @node: node to test
  529. * @compat: compatible string to compare with compatible list.
  530. *
  531. * Return: a non-zero value on match with smaller values returned for more
  532. * specific compatible values.
  533. */
  534. static int of_fdt_is_compatible(const void *blob,
  535. unsigned long node, const char *compat)
  536. {
  537. const char *cp;
  538. int cplen;
  539. unsigned long l, score = 0;
  540. cp = fdt_getprop(blob, node, "compatible", &cplen);
  541. if (cp == NULL)
  542. return 0;
  543. while (cplen > 0) {
  544. score++;
  545. if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
  546. return score;
  547. l = strlen(cp) + 1;
  548. cp += l;
  549. cplen -= l;
  550. }
  551. return 0;
  552. }
  553. /**
  554. * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
  555. * @node: node to test
  556. * @compat: compatible string to compare with compatible list.
  557. */
  558. int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
  559. {
  560. return of_fdt_is_compatible(initial_boot_params, node, compat);
  561. }
  562. /*
  563. * of_flat_dt_match - Return true if node matches a list of compatible values
  564. */
  565. static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
  566. {
  567. unsigned int tmp, score = 0;
  568. if (!compat)
  569. return 0;
  570. while (*compat) {
  571. tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
  572. if (tmp && (score == 0 || (tmp < score)))
  573. score = tmp;
  574. compat++;
  575. }
  576. return score;
  577. }
  578. /*
  579. * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
  580. */
  581. uint32_t __init of_get_flat_dt_phandle(unsigned long node)
  582. {
  583. return fdt_get_phandle(initial_boot_params, node);
  584. }
  585. const char * __init of_flat_dt_get_machine_name(void)
  586. {
  587. const char *name;
  588. unsigned long dt_root = of_get_flat_dt_root();
  589. name = of_get_flat_dt_prop(dt_root, "model", NULL);
  590. if (!name)
  591. name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
  592. return name;
  593. }
  594. /**
  595. * of_flat_dt_match_machine - Iterate match tables to find matching machine.
  596. *
  597. * @default_match: A machine specific ptr to return in case of no match.
  598. * @get_next_compat: callback function to return next compatible match table.
  599. *
  600. * Iterate through machine match tables to find the best match for the machine
  601. * compatible string in the FDT.
  602. */
  603. const void * __init of_flat_dt_match_machine(const void *default_match,
  604. const void * (*get_next_compat)(const char * const**))
  605. {
  606. const void *data = NULL;
  607. const void *best_data = default_match;
  608. const char *const *compat;
  609. unsigned long dt_root;
  610. unsigned int best_score = ~1, score = 0;
  611. dt_root = of_get_flat_dt_root();
  612. while ((data = get_next_compat(&compat))) {
  613. score = of_flat_dt_match(dt_root, compat);
  614. if (score > 0 && score < best_score) {
  615. best_data = data;
  616. best_score = score;
  617. }
  618. }
  619. if (!best_data) {
  620. const char *prop;
  621. int size;
  622. pr_err("\n unrecognized device tree list:\n[ ");
  623. prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
  624. if (prop) {
  625. while (size > 0) {
  626. printk("'%s' ", prop);
  627. size -= strlen(prop) + 1;
  628. prop += strlen(prop) + 1;
  629. }
  630. }
  631. printk("]\n\n");
  632. return NULL;
  633. }
  634. pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
  635. return best_data;
  636. }
  637. static void __early_init_dt_declare_initrd(unsigned long start,
  638. unsigned long end)
  639. {
  640. /*
  641. * __va() is not yet available this early on some platforms. In that
  642. * case, the platform uses phys_initrd_start/phys_initrd_size instead
  643. * and does the VA conversion itself.
  644. */
  645. if (!IS_ENABLED(CONFIG_ARM64) &&
  646. !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
  647. initrd_start = (unsigned long)__va(start);
  648. initrd_end = (unsigned long)__va(end);
  649. initrd_below_start_ok = 1;
  650. }
  651. }
  652. /**
  653. * early_init_dt_check_for_initrd - Decode initrd location from flat tree
  654. * @node: reference to node containing initrd location ('chosen')
  655. */
  656. static void __init early_init_dt_check_for_initrd(unsigned long node)
  657. {
  658. u64 start, end;
  659. int len;
  660. const __be32 *prop;
  661. if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
  662. return;
  663. pr_debug("Looking for initrd properties... ");
  664. prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
  665. if (!prop)
  666. return;
  667. start = of_read_number(prop, len/4);
  668. prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
  669. if (!prop)
  670. return;
  671. end = of_read_number(prop, len/4);
  672. if (start > end)
  673. return;
  674. __early_init_dt_declare_initrd(start, end);
  675. phys_initrd_start = start;
  676. phys_initrd_size = end - start;
  677. pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
  678. }
  679. /**
  680. * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
  681. * tree
  682. * @node: reference to node containing elfcorehdr location ('chosen')
  683. */
  684. static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
  685. {
  686. const __be32 *prop;
  687. int len;
  688. if (!IS_ENABLED(CONFIG_CRASH_DUMP))
  689. return;
  690. pr_debug("Looking for elfcorehdr property... ");
  691. prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
  692. if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
  693. return;
  694. elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
  695. elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
  696. pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
  697. elfcorehdr_addr, elfcorehdr_size);
  698. }
  699. static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
  700. /*
  701. * The main usage of linux,usable-memory-range is for crash dump kernel.
  702. * Originally, the number of usable-memory regions is one. Now there may
  703. * be two regions, low region and high region.
  704. * To make compatibility with existing user-space and older kdump, the low
  705. * region is always the last range of linux,usable-memory-range if exist.
  706. */
  707. #define MAX_USABLE_RANGES 2
  708. /**
  709. * early_init_dt_check_for_usable_mem_range - Decode usable memory range
  710. * location from flat tree
  711. */
  712. void __init early_init_dt_check_for_usable_mem_range(void)
  713. {
  714. struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
  715. const __be32 *prop, *endp;
  716. int len, i;
  717. unsigned long node = chosen_node_offset;
  718. if ((long)node < 0)
  719. return;
  720. pr_debug("Looking for usable-memory-range property... ");
  721. prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
  722. if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
  723. return;
  724. endp = prop + (len / sizeof(__be32));
  725. for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
  726. rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
  727. rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
  728. pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
  729. i, &rgn[i].base, &rgn[i].size);
  730. }
  731. memblock_cap_memory_range(rgn[0].base, rgn[0].size);
  732. for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
  733. memblock_add(rgn[i].base, rgn[i].size);
  734. }
  735. #ifdef CONFIG_SERIAL_EARLYCON
  736. int __init early_init_dt_scan_chosen_stdout(void)
  737. {
  738. int offset;
  739. const char *p, *q, *options = NULL;
  740. int l;
  741. const struct earlycon_id *match;
  742. const void *fdt = initial_boot_params;
  743. int ret;
  744. offset = fdt_path_offset(fdt, "/chosen");
  745. if (offset < 0)
  746. offset = fdt_path_offset(fdt, "/chosen@0");
  747. if (offset < 0)
  748. return -ENOENT;
  749. p = fdt_getprop(fdt, offset, "stdout-path", &l);
  750. if (!p)
  751. p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
  752. if (!p || !l)
  753. return -ENOENT;
  754. q = strchrnul(p, ':');
  755. if (*q != '\0')
  756. options = q + 1;
  757. l = q - p;
  758. /* Get the node specified by stdout-path */
  759. offset = fdt_path_offset_namelen(fdt, p, l);
  760. if (offset < 0) {
  761. pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
  762. return 0;
  763. }
  764. for (match = __earlycon_table; match < __earlycon_table_end; match++) {
  765. if (!match->compatible[0])
  766. continue;
  767. if (fdt_node_check_compatible(fdt, offset, match->compatible))
  768. continue;
  769. ret = of_setup_earlycon(match, offset, options);
  770. if (!ret || ret == -EALREADY)
  771. return 0;
  772. }
  773. return -ENODEV;
  774. }
  775. #endif
  776. /*
  777. * early_init_dt_scan_root - fetch the top level address and size cells
  778. */
  779. int __init early_init_dt_scan_root(void)
  780. {
  781. const __be32 *prop;
  782. const void *fdt = initial_boot_params;
  783. int node = fdt_path_offset(fdt, "/");
  784. if (node < 0)
  785. return -ENODEV;
  786. dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  787. dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  788. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  789. if (prop)
  790. dt_root_size_cells = be32_to_cpup(prop);
  791. pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
  792. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  793. if (prop)
  794. dt_root_addr_cells = be32_to_cpup(prop);
  795. pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
  796. return 0;
  797. }
  798. u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
  799. {
  800. const __be32 *p = *cellp;
  801. *cellp = p + s;
  802. return of_read_number(p, s);
  803. }
  804. /*
  805. * early_init_dt_scan_memory - Look for and parse memory nodes
  806. */
  807. int __init early_init_dt_scan_memory(void)
  808. {
  809. int node, found_memory = 0;
  810. const void *fdt = initial_boot_params;
  811. fdt_for_each_subnode(node, fdt, 0) {
  812. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  813. const __be32 *reg, *endp;
  814. int l;
  815. bool hotpluggable;
  816. /* We are scanning "memory" nodes only */
  817. if (type == NULL || strcmp(type, "memory") != 0)
  818. continue;
  819. if (!of_fdt_device_is_available(fdt, node))
  820. continue;
  821. reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
  822. if (reg == NULL)
  823. reg = of_get_flat_dt_prop(node, "reg", &l);
  824. if (reg == NULL)
  825. continue;
  826. endp = reg + (l / sizeof(__be32));
  827. hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
  828. pr_debug("memory scan node %s, reg size %d,\n",
  829. fdt_get_name(fdt, node, NULL), l);
  830. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  831. u64 base, size;
  832. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  833. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  834. if (size == 0)
  835. continue;
  836. pr_debug(" - %llx, %llx\n", base, size);
  837. early_init_dt_add_memory_arch(base, size);
  838. found_memory = 1;
  839. if (!hotpluggable)
  840. continue;
  841. if (memblock_mark_hotplug(base, size))
  842. pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
  843. base, base + size);
  844. }
  845. }
  846. return found_memory;
  847. }
  848. int __init early_init_dt_scan_chosen(char *cmdline)
  849. {
  850. int l, node;
  851. const char *p;
  852. const void *rng_seed;
  853. const void *fdt = initial_boot_params;
  854. node = fdt_path_offset(fdt, "/chosen");
  855. if (node < 0)
  856. node = fdt_path_offset(fdt, "/chosen@0");
  857. if (node < 0)
  858. /* Handle the cmdline config options even if no /chosen node */
  859. goto handle_cmdline;
  860. chosen_node_offset = node;
  861. early_init_dt_check_for_initrd(node);
  862. early_init_dt_check_for_elfcorehdr(node);
  863. rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
  864. if (rng_seed && l > 0) {
  865. add_bootloader_randomness(rng_seed, l);
  866. /* try to clear seed so it won't be found. */
  867. fdt_nop_property(initial_boot_params, node, "rng-seed");
  868. /* update CRC check value */
  869. of_fdt_crc32 = crc32_be(~0, initial_boot_params,
  870. fdt_totalsize(initial_boot_params));
  871. }
  872. /* Retrieve command line */
  873. p = of_get_flat_dt_prop(node, "bootargs", &l);
  874. if (p != NULL && l > 0)
  875. strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
  876. handle_cmdline:
  877. /*
  878. * CONFIG_CMDLINE is meant to be a default in case nothing else
  879. * managed to set the command line, unless CONFIG_CMDLINE_FORCE
  880. * is set in which case we override whatever was found earlier.
  881. */
  882. #ifdef CONFIG_CMDLINE
  883. #if defined(CONFIG_CMDLINE_EXTEND)
  884. strlcat(cmdline, " ", COMMAND_LINE_SIZE);
  885. strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  886. #elif defined(CONFIG_CMDLINE_FORCE)
  887. strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  888. #else
  889. /* No arguments from boot loader, use kernel's cmdl*/
  890. if (!((char *)cmdline)[0])
  891. strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  892. #endif
  893. #endif /* CONFIG_CMDLINE */
  894. pr_debug("Command line is: %s\n", (char *)cmdline);
  895. return 0;
  896. }
  897. #ifndef MIN_MEMBLOCK_ADDR
  898. #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
  899. #endif
  900. #ifndef MAX_MEMBLOCK_ADDR
  901. #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
  902. #endif
  903. void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
  904. {
  905. const u64 phys_offset = MIN_MEMBLOCK_ADDR;
  906. if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
  907. pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
  908. base, base + size);
  909. return;
  910. }
  911. if (!PAGE_ALIGNED(base)) {
  912. size -= PAGE_SIZE - (base & ~PAGE_MASK);
  913. base = PAGE_ALIGN(base);
  914. }
  915. size &= PAGE_MASK;
  916. if (base > MAX_MEMBLOCK_ADDR) {
  917. pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
  918. base, base + size);
  919. return;
  920. }
  921. if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
  922. pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
  923. ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
  924. size = MAX_MEMBLOCK_ADDR - base + 1;
  925. }
  926. if (base + size < phys_offset) {
  927. pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
  928. base, base + size);
  929. return;
  930. }
  931. if (base < phys_offset) {
  932. pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
  933. base, phys_offset);
  934. size -= phys_offset - base;
  935. base = phys_offset;
  936. }
  937. memblock_add(base, size);
  938. }
  939. static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
  940. {
  941. void *ptr = memblock_alloc(size, align);
  942. if (!ptr)
  943. panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
  944. __func__, size, align);
  945. return ptr;
  946. }
  947. bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
  948. {
  949. if (!dt_virt)
  950. return false;
  951. /* check device tree validity */
  952. if (fdt_check_header(dt_virt))
  953. return false;
  954. /* Setup flat device-tree pointer */
  955. initial_boot_params = dt_virt;
  956. initial_boot_params_pa = dt_phys;
  957. of_fdt_crc32 = crc32_be(~0, initial_boot_params,
  958. fdt_totalsize(initial_boot_params));
  959. /* Initialize {size,address}-cells info */
  960. early_init_dt_scan_root();
  961. return true;
  962. }
  963. void __init early_init_dt_scan_nodes(void)
  964. {
  965. int rc;
  966. /* Retrieve various information from the /chosen node */
  967. rc = early_init_dt_scan_chosen(boot_command_line);
  968. if (rc)
  969. pr_warn("No chosen node found, continuing without\n");
  970. /* Setup memory, calling early_init_dt_add_memory_arch */
  971. early_init_dt_scan_memory();
  972. /* Handle linux,usable-memory-range property */
  973. early_init_dt_check_for_usable_mem_range();
  974. }
  975. bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
  976. {
  977. bool status;
  978. status = early_init_dt_verify(dt_virt, dt_phys);
  979. if (!status)
  980. return false;
  981. early_init_dt_scan_nodes();
  982. return true;
  983. }
  984. static void *__init copy_device_tree(void *fdt)
  985. {
  986. int size;
  987. void *dt;
  988. size = fdt_totalsize(fdt);
  989. dt = early_init_dt_alloc_memory_arch(size,
  990. roundup_pow_of_two(FDT_V17_SIZE));
  991. if (dt)
  992. memcpy(dt, fdt, size);
  993. return dt;
  994. }
  995. /**
  996. * unflatten_device_tree - create tree of device_nodes from flat blob
  997. *
  998. * unflattens the device-tree passed by the firmware, creating the
  999. * tree of struct device_node. It also fills the "name" and "type"
  1000. * pointers of the nodes so the normal device-tree walking functions
  1001. * can be used.
  1002. */
  1003. void __init unflatten_device_tree(void)
  1004. {
  1005. void *fdt = initial_boot_params;
  1006. /* Save the statically-placed regions in the reserved_mem array */
  1007. fdt_scan_reserved_mem_reg_nodes();
  1008. /* Populate an empty root node when bootloader doesn't provide one */
  1009. if (!fdt) {
  1010. fdt = (void *) __dtb_empty_root_begin;
  1011. /* fdt_totalsize() will be used for copy size */
  1012. if (fdt_totalsize(fdt) >
  1013. __dtb_empty_root_end - __dtb_empty_root_begin) {
  1014. pr_err("invalid size in dtb_empty_root\n");
  1015. return;
  1016. }
  1017. of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
  1018. fdt = copy_device_tree(fdt);
  1019. }
  1020. __unflatten_device_tree(fdt, NULL, &of_root,
  1021. early_init_dt_alloc_memory_arch, false);
  1022. /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
  1023. of_alias_scan(early_init_dt_alloc_memory_arch);
  1024. unittest_unflatten_overlay_base();
  1025. }
  1026. /**
  1027. * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
  1028. *
  1029. * Copies and unflattens the device-tree passed by the firmware, creating the
  1030. * tree of struct device_node. It also fills the "name" and "type"
  1031. * pointers of the nodes so the normal device-tree walking functions
  1032. * can be used. This should only be used when the FDT memory has not been
  1033. * reserved such is the case when the FDT is built-in to the kernel init
  1034. * section. If the FDT memory is reserved already then unflatten_device_tree
  1035. * should be used instead.
  1036. */
  1037. void __init unflatten_and_copy_device_tree(void)
  1038. {
  1039. if (initial_boot_params)
  1040. initial_boot_params = copy_device_tree(initial_boot_params);
  1041. unflatten_device_tree();
  1042. }
  1043. #ifdef CONFIG_SYSFS
  1044. static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
  1045. struct bin_attribute *bin_attr,
  1046. char *buf, loff_t off, size_t count)
  1047. {
  1048. memcpy(buf, initial_boot_params + off, count);
  1049. return count;
  1050. }
  1051. static int __init of_fdt_raw_init(void)
  1052. {
  1053. static struct bin_attribute of_fdt_raw_attr =
  1054. __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
  1055. if (!initial_boot_params)
  1056. return 0;
  1057. if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
  1058. fdt_totalsize(initial_boot_params))) {
  1059. pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
  1060. return 0;
  1061. }
  1062. of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
  1063. return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
  1064. }
  1065. late_initcall(of_fdt_raw_init);
  1066. #endif
  1067. #endif /* CONFIG_OF_EARLY_FLATTREE */