adi_64.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /* adi_64.c: support for ADI (Application Data Integrity) feature on
  2. * sparc m7 and newer processors. This feature is also known as
  3. * SSM (Silicon Secured Memory).
  4. *
  5. * Copyright (C) 2016 Oracle and/or its affiliates. All rights reserved.
  6. * Author: Khalid Aziz (khalid.aziz@oracle.com)
  7. *
  8. * This work is licensed under the terms of the GNU GPL, version 2.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/slab.h>
  12. #include <linux/mm_types.h>
  13. #include <asm/mdesc.h>
  14. #include <asm/adi_64.h>
  15. #include <asm/mmu_64.h>
  16. #include <asm/pgtable_64.h>
  17. /* Each page of storage for ADI tags can accommodate tags for 128
  18. * pages. When ADI enabled pages are being swapped out, it would be
  19. * prudent to allocate at least enough tag storage space to accommodate
  20. * SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to
  21. * store tags for four SWAPFILE_CLUSTER pages to reduce need for
  22. * further allocations for same vma.
  23. */
  24. #define TAG_STORAGE_PAGES 8
  25. struct adi_config adi_state;
  26. EXPORT_SYMBOL(adi_state);
  27. /* mdesc_adi_init() : Parse machine description provided by the
  28. * hypervisor to detect ADI capabilities
  29. *
  30. * Hypervisor reports ADI capabilities of platform in "hwcap-list" property
  31. * for "cpu" node. If the platform supports ADI, "hwcap-list" property
  32. * contains the keyword "adp". If the platform supports ADI, "platform"
  33. * node will contain "adp-blksz", "adp-nbits" and "ue-on-adp" properties
  34. * to describe the ADI capabilities.
  35. */
  36. void __init mdesc_adi_init(void)
  37. {
  38. struct mdesc_handle *hp = mdesc_grab();
  39. const char *prop;
  40. u64 pn, *val;
  41. int len;
  42. if (!hp)
  43. goto adi_not_found;
  44. pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
  45. if (pn == MDESC_NODE_NULL)
  46. goto adi_not_found;
  47. prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
  48. if (!prop)
  49. goto adi_not_found;
  50. /*
  51. * Look for "adp" keyword in hwcap-list which would indicate
  52. * ADI support
  53. */
  54. adi_state.enabled = false;
  55. while (len) {
  56. int plen;
  57. if (!strcmp(prop, "adp")) {
  58. adi_state.enabled = true;
  59. break;
  60. }
  61. plen = strlen(prop) + 1;
  62. prop += plen;
  63. len -= plen;
  64. }
  65. if (!adi_state.enabled)
  66. goto adi_not_found;
  67. /* Find the ADI properties in "platform" node. If all ADI
  68. * properties are not found, ADI support is incomplete and
  69. * do not enable ADI in the kernel.
  70. */
  71. pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
  72. if (pn == MDESC_NODE_NULL)
  73. goto adi_not_found;
  74. val = (u64 *) mdesc_get_property(hp, pn, "adp-blksz", &len);
  75. if (!val)
  76. goto adi_not_found;
  77. adi_state.caps.blksz = *val;
  78. val = (u64 *) mdesc_get_property(hp, pn, "adp-nbits", &len);
  79. if (!val)
  80. goto adi_not_found;
  81. adi_state.caps.nbits = *val;
  82. val = (u64 *) mdesc_get_property(hp, pn, "ue-on-adp", &len);
  83. if (!val)
  84. goto adi_not_found;
  85. adi_state.caps.ue_on_adi = *val;
  86. /* Some of the code to support swapping ADI tags is written
  87. * assumption that two ADI tags can fit inside one byte. If
  88. * this assumption is broken by a future architecture change,
  89. * that code will have to be revisited. If that were to happen,
  90. * disable ADI support so we do not get unpredictable results
  91. * with programs trying to use ADI and their pages getting
  92. * swapped out
  93. */
  94. if (adi_state.caps.nbits > 4) {
  95. pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n");
  96. adi_state.enabled = false;
  97. }
  98. mdesc_release(hp);
  99. return;
  100. adi_not_found:
  101. adi_state.enabled = false;
  102. adi_state.caps.blksz = 0;
  103. adi_state.caps.nbits = 0;
  104. if (hp)
  105. mdesc_release(hp);
  106. }
  107. tag_storage_desc_t *find_tag_store(struct mm_struct *mm,
  108. struct vm_area_struct *vma,
  109. unsigned long addr)
  110. {
  111. tag_storage_desc_t *tag_desc = NULL;
  112. unsigned long i, max_desc, flags;
  113. /* Check if this vma already has tag storage descriptor
  114. * allocated for it.
  115. */
  116. max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
  117. if (mm->context.tag_store) {
  118. tag_desc = mm->context.tag_store;
  119. spin_lock_irqsave(&mm->context.tag_lock, flags);
  120. for (i = 0; i < max_desc; i++) {
  121. if ((addr >= tag_desc->start) &&
  122. ((addr + PAGE_SIZE - 1) <= tag_desc->end))
  123. break;
  124. tag_desc++;
  125. }
  126. spin_unlock_irqrestore(&mm->context.tag_lock, flags);
  127. /* If no matching entries were found, this must be a
  128. * freshly allocated page
  129. */
  130. if (i >= max_desc)
  131. tag_desc = NULL;
  132. }
  133. return tag_desc;
  134. }
  135. tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm,
  136. struct vm_area_struct *vma,
  137. unsigned long addr)
  138. {
  139. unsigned char *tags;
  140. unsigned long i, size, max_desc, flags;
  141. tag_storage_desc_t *tag_desc, *open_desc;
  142. unsigned long end_addr, hole_start, hole_end;
  143. max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
  144. open_desc = NULL;
  145. hole_start = 0;
  146. hole_end = ULONG_MAX;
  147. end_addr = addr + PAGE_SIZE - 1;
  148. /* Check if this vma already has tag storage descriptor
  149. * allocated for it.
  150. */
  151. spin_lock_irqsave(&mm->context.tag_lock, flags);
  152. if (mm->context.tag_store) {
  153. tag_desc = mm->context.tag_store;
  154. /* Look for a matching entry for this address. While doing
  155. * that, look for the first open slot as well and find
  156. * the hole in already allocated range where this request
  157. * will fit in.
  158. */
  159. for (i = 0; i < max_desc; i++) {
  160. if (tag_desc->tag_users == 0) {
  161. if (open_desc == NULL)
  162. open_desc = tag_desc;
  163. } else {
  164. if ((addr >= tag_desc->start) &&
  165. (tag_desc->end >= (addr + PAGE_SIZE - 1))) {
  166. tag_desc->tag_users++;
  167. goto out;
  168. }
  169. }
  170. if ((tag_desc->start > end_addr) &&
  171. (tag_desc->start < hole_end))
  172. hole_end = tag_desc->start;
  173. if ((tag_desc->end < addr) &&
  174. (tag_desc->end > hole_start))
  175. hole_start = tag_desc->end;
  176. tag_desc++;
  177. }
  178. } else {
  179. size = sizeof(tag_storage_desc_t)*max_desc;
  180. mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
  181. if (mm->context.tag_store == NULL) {
  182. tag_desc = NULL;
  183. goto out;
  184. }
  185. tag_desc = mm->context.tag_store;
  186. for (i = 0; i < max_desc; i++, tag_desc++)
  187. tag_desc->tag_users = 0;
  188. open_desc = mm->context.tag_store;
  189. i = 0;
  190. }
  191. /* Check if we ran out of tag storage descriptors */
  192. if (open_desc == NULL) {
  193. tag_desc = NULL;
  194. goto out;
  195. }
  196. /* Mark this tag descriptor slot in use and then initialize it */
  197. tag_desc = open_desc;
  198. tag_desc->tag_users = 1;
  199. /* Tag storage has not been allocated for this vma and space
  200. * is available in tag storage descriptor. Since this page is
  201. * being swapped out, there is high probability subsequent pages
  202. * in the VMA will be swapped out as well. Allocate pages to
  203. * store tags for as many pages in this vma as possible but not
  204. * more than TAG_STORAGE_PAGES. Each byte in tag space holds
  205. * two ADI tags since each ADI tag is 4 bits. Each ADI tag
  206. * covers adi_blksize() worth of addresses. Check if the hole is
  207. * big enough to accommodate full address range for using
  208. * TAG_STORAGE_PAGES number of tag pages.
  209. */
  210. size = TAG_STORAGE_PAGES * PAGE_SIZE;
  211. end_addr = addr + (size*2*adi_blksize()) - 1;
  212. /* Check for overflow. If overflow occurs, allocate only one page */
  213. if (end_addr < addr) {
  214. size = PAGE_SIZE;
  215. end_addr = addr + (size*2*adi_blksize()) - 1;
  216. /* If overflow happens with the minimum tag storage
  217. * allocation as well, adjust ending address for this
  218. * tag storage.
  219. */
  220. if (end_addr < addr)
  221. end_addr = ULONG_MAX;
  222. }
  223. if (hole_end < end_addr) {
  224. /* Available hole is too small on the upper end of
  225. * address. Can we expand the range towards the lower
  226. * address and maximize use of this slot?
  227. */
  228. unsigned long tmp_addr;
  229. end_addr = hole_end - 1;
  230. tmp_addr = end_addr - (size*2*adi_blksize()) + 1;
  231. /* Check for underflow. If underflow occurs, allocate
  232. * only one page for storing ADI tags
  233. */
  234. if (tmp_addr > addr) {
  235. size = PAGE_SIZE;
  236. tmp_addr = end_addr - (size*2*adi_blksize()) - 1;
  237. /* If underflow happens with the minimum tag storage
  238. * allocation as well, adjust starting address for
  239. * this tag storage.
  240. */
  241. if (tmp_addr > addr)
  242. tmp_addr = 0;
  243. }
  244. if (tmp_addr < hole_start) {
  245. /* Available hole is restricted on lower address
  246. * end as well
  247. */
  248. tmp_addr = hole_start + 1;
  249. }
  250. addr = tmp_addr;
  251. size = (end_addr + 1 - addr)/(2*adi_blksize());
  252. size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE;
  253. size = size * PAGE_SIZE;
  254. }
  255. tags = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
  256. if (tags == NULL) {
  257. tag_desc->tag_users = 0;
  258. tag_desc = NULL;
  259. goto out;
  260. }
  261. tag_desc->start = addr;
  262. tag_desc->tags = tags;
  263. tag_desc->end = end_addr;
  264. out:
  265. spin_unlock_irqrestore(&mm->context.tag_lock, flags);
  266. return tag_desc;
  267. }
  268. void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm)
  269. {
  270. unsigned long flags;
  271. unsigned char *tags = NULL;
  272. spin_lock_irqsave(&mm->context.tag_lock, flags);
  273. tag_desc->tag_users--;
  274. if (tag_desc->tag_users == 0) {
  275. tag_desc->start = tag_desc->end = 0;
  276. /* Do not free up the tag storage space allocated
  277. * by the first descriptor. This is persistent
  278. * emergency tag storage space for the task.
  279. */
  280. if (tag_desc != mm->context.tag_store) {
  281. tags = tag_desc->tags;
  282. tag_desc->tags = NULL;
  283. }
  284. }
  285. spin_unlock_irqrestore(&mm->context.tag_lock, flags);
  286. kfree(tags);
  287. }
  288. #define tag_start(addr, tag_desc) \
  289. ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize())))
  290. /* Retrieve any saved ADI tags for the page being swapped back in and
  291. * restore these tags to the newly allocated physical page.
  292. */
  293. void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
  294. unsigned long addr, pte_t pte)
  295. {
  296. unsigned char *tag;
  297. tag_storage_desc_t *tag_desc;
  298. unsigned long paddr, tmp, version1, version2;
  299. /* Check if the swapped out page has an ADI version
  300. * saved. If yes, restore version tag to the newly
  301. * allocated page.
  302. */
  303. tag_desc = find_tag_store(mm, vma, addr);
  304. if (tag_desc == NULL)
  305. return;
  306. tag = tag_start(addr, tag_desc);
  307. paddr = pte_val(pte) & _PAGE_PADDR_4V;
  308. for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
  309. version1 = (*tag) >> 4;
  310. version2 = (*tag) & 0x0f;
  311. *tag++ = 0;
  312. asm volatile("stxa %0, [%1] %2\n\t"
  313. :
  314. : "r" (version1), "r" (tmp),
  315. "i" (ASI_MCD_REAL));
  316. tmp += adi_blksize();
  317. asm volatile("stxa %0, [%1] %2\n\t"
  318. :
  319. : "r" (version2), "r" (tmp),
  320. "i" (ASI_MCD_REAL));
  321. }
  322. asm volatile("membar #Sync\n\t");
  323. /* Check and mark this tag space for release later if
  324. * the swapped in page was the last user of tag space
  325. */
  326. del_tag_store(tag_desc, mm);
  327. }
  328. /* A page is about to be swapped out. Save any ADI tags associated with
  329. * this physical page so they can be restored later when the page is swapped
  330. * back in.
  331. */
  332. int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
  333. unsigned long addr, pte_t oldpte)
  334. {
  335. unsigned char *tag;
  336. tag_storage_desc_t *tag_desc;
  337. unsigned long version1, version2, paddr, tmp;
  338. tag_desc = alloc_tag_store(mm, vma, addr);
  339. if (tag_desc == NULL)
  340. return -1;
  341. tag = tag_start(addr, tag_desc);
  342. paddr = pte_val(oldpte) & _PAGE_PADDR_4V;
  343. for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
  344. asm volatile("ldxa [%1] %2, %0\n\t"
  345. : "=r" (version1)
  346. : "r" (tmp), "i" (ASI_MCD_REAL));
  347. tmp += adi_blksize();
  348. asm volatile("ldxa [%1] %2, %0\n\t"
  349. : "=r" (version2)
  350. : "r" (tmp), "i" (ASI_MCD_REAL));
  351. *tag = (version1 << 4) | version2;
  352. tag++;
  353. }
  354. return 0;
  355. }