vmlinux-xip.lds.S 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. * Copyright (C) 2020 Vitaly Wool, Konsulko AB
  6. */
  7. #include <asm/pgtable.h>
  8. #define LOAD_OFFSET KERNEL_LINK_ADDR
  9. /* No __ro_after_init data in the .rodata section - which will always be ro */
  10. #define RO_AFTER_INIT_DATA
  11. #include <asm/vmlinux.lds.h>
  12. #include <asm/page.h>
  13. #include <asm/cache.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/set_memory.h>
  16. OUTPUT_ARCH(riscv)
  17. ENTRY(_start)
  18. jiffies = jiffies_64;
  19. SECTIONS
  20. {
  21. /* Beginning of code and text segment */
  22. . = LOAD_OFFSET;
  23. _xiprom = .;
  24. _start = .;
  25. HEAD_TEXT_SECTION
  26. INIT_TEXT_SECTION(PAGE_SIZE)
  27. /* we have to discard exit text and such at runtime, not link time */
  28. __exittext_begin = .;
  29. .exit.text :
  30. {
  31. EXIT_TEXT
  32. }
  33. __exittext_end = .;
  34. .text : {
  35. _text = .;
  36. _stext = .;
  37. TEXT_TEXT
  38. SCHED_TEXT
  39. LOCK_TEXT
  40. KPROBES_TEXT
  41. ENTRY_TEXT
  42. IRQENTRY_TEXT
  43. SOFTIRQENTRY_TEXT
  44. _etext = .;
  45. }
  46. RO_DATA(L1_CACHE_BYTES)
  47. .srodata : {
  48. *(.srodata*)
  49. }
  50. .init.rodata : {
  51. INIT_SETUP(16)
  52. INIT_CALLS
  53. CON_INITCALL
  54. INIT_RAM_FS
  55. }
  56. _exiprom = .; /* End of XIP ROM area */
  57. /*
  58. * From this point, stuff is considered writable and will be copied to RAM
  59. */
  60. __data_loc = ALIGN(PAGE_SIZE); /* location in file */
  61. . = ALIGN(SECTION_ALIGN); /* location in memory */
  62. #undef LOAD_OFFSET
  63. #define LOAD_OFFSET (KERNEL_LINK_ADDR + _sdata - __data_loc)
  64. _sdata = .; /* Start of data section */
  65. _data = .;
  66. RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
  67. _edata = .;
  68. __start_ro_after_init = .;
  69. .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
  70. *(.data..ro_after_init)
  71. }
  72. __end_ro_after_init = .;
  73. . = ALIGN(PAGE_SIZE);
  74. __init_begin = .;
  75. .init.data : {
  76. INIT_DATA
  77. }
  78. .exit.data : {
  79. EXIT_DATA
  80. }
  81. . = ALIGN(8);
  82. __soc_early_init_table : {
  83. __soc_early_init_table_start = .;
  84. KEEP(*(__soc_early_init_table))
  85. __soc_early_init_table_end = .;
  86. }
  87. __soc_builtin_dtb_table : {
  88. __soc_builtin_dtb_table_start = .;
  89. KEEP(*(__soc_builtin_dtb_table))
  90. __soc_builtin_dtb_table_end = .;
  91. }
  92. __init_end = .;
  93. . = ALIGN(16);
  94. .xip.traps : {
  95. __xip_traps_start = .;
  96. *(.xip.traps)
  97. __xip_traps_end = .;
  98. }
  99. . = ALIGN(PAGE_SIZE);
  100. .sdata : {
  101. __global_pointer$ = . + 0x800;
  102. *(.sdata*)
  103. *(.sbss*)
  104. }
  105. BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
  106. PERCPU_SECTION(L1_CACHE_BYTES)
  107. .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) {
  108. *(.rel.dyn*)
  109. }
  110. /*
  111. * End of copied data. We need a dummy section to get its LMA.
  112. * Also located before final ALIGN() as trailing padding is not stored
  113. * in the resulting binary file and useless to copy.
  114. */
  115. .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
  116. _edata_loc = LOADADDR(.data.endmark);
  117. . = ALIGN(PAGE_SIZE);
  118. _end = .;
  119. STABS_DEBUG
  120. DWARF_DEBUG
  121. DISCARDS
  122. }