ttm_memory.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #define pr_fmt(fmt) "[TTM] " fmt
  29. #include <drm/ttm/ttm_memory.h>
  30. #include <drm/ttm/ttm_module.h>
  31. #include <drm/ttm/ttm_page_alloc.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/sched.h>
  34. #include <linux/wait.h>
  35. #include <linux/mm.h>
  36. #include <linux/module.h>
  37. #include <linux/slab.h>
  38. #include <linux/swap.h>
  39. #define TTM_MEMORY_ALLOC_RETRIES 4
  40. struct ttm_mem_zone {
  41. struct kobject kobj;
  42. struct ttm_mem_global *glob;
  43. const char *name;
  44. uint64_t zone_mem;
  45. uint64_t emer_mem;
  46. uint64_t max_mem;
  47. uint64_t swap_limit;
  48. uint64_t used_mem;
  49. };
  50. static struct attribute ttm_mem_sys = {
  51. .name = "zone_memory",
  52. .mode = S_IRUGO
  53. };
  54. static struct attribute ttm_mem_emer = {
  55. .name = "emergency_memory",
  56. .mode = S_IRUGO | S_IWUSR
  57. };
  58. static struct attribute ttm_mem_max = {
  59. .name = "available_memory",
  60. .mode = S_IRUGO | S_IWUSR
  61. };
  62. static struct attribute ttm_mem_swap = {
  63. .name = "swap_limit",
  64. .mode = S_IRUGO | S_IWUSR
  65. };
  66. static struct attribute ttm_mem_used = {
  67. .name = "used_memory",
  68. .mode = S_IRUGO
  69. };
  70. static void ttm_mem_zone_kobj_release(struct kobject *kobj)
  71. {
  72. struct ttm_mem_zone *zone =
  73. container_of(kobj, struct ttm_mem_zone, kobj);
  74. pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
  75. zone->name, (unsigned long long)zone->used_mem >> 10);
  76. kfree(zone);
  77. }
  78. static ssize_t ttm_mem_zone_show(struct kobject *kobj,
  79. struct attribute *attr,
  80. char *buffer)
  81. {
  82. struct ttm_mem_zone *zone =
  83. container_of(kobj, struct ttm_mem_zone, kobj);
  84. uint64_t val = 0;
  85. spin_lock(&zone->glob->lock);
  86. if (attr == &ttm_mem_sys)
  87. val = zone->zone_mem;
  88. else if (attr == &ttm_mem_emer)
  89. val = zone->emer_mem;
  90. else if (attr == &ttm_mem_max)
  91. val = zone->max_mem;
  92. else if (attr == &ttm_mem_swap)
  93. val = zone->swap_limit;
  94. else if (attr == &ttm_mem_used)
  95. val = zone->used_mem;
  96. spin_unlock(&zone->glob->lock);
  97. return snprintf(buffer, PAGE_SIZE, "%llu\n",
  98. (unsigned long long) val >> 10);
  99. }
  100. static void ttm_check_swapping(struct ttm_mem_global *glob);
  101. static ssize_t ttm_mem_zone_store(struct kobject *kobj,
  102. struct attribute *attr,
  103. const char *buffer,
  104. size_t size)
  105. {
  106. struct ttm_mem_zone *zone =
  107. container_of(kobj, struct ttm_mem_zone, kobj);
  108. int chars;
  109. unsigned long val;
  110. uint64_t val64;
  111. chars = sscanf(buffer, "%lu", &val);
  112. if (chars == 0)
  113. return size;
  114. val64 = val;
  115. val64 <<= 10;
  116. spin_lock(&zone->glob->lock);
  117. if (val64 > zone->zone_mem)
  118. val64 = zone->zone_mem;
  119. if (attr == &ttm_mem_emer) {
  120. zone->emer_mem = val64;
  121. if (zone->max_mem > val64)
  122. zone->max_mem = val64;
  123. } else if (attr == &ttm_mem_max) {
  124. zone->max_mem = val64;
  125. if (zone->emer_mem < val64)
  126. zone->emer_mem = val64;
  127. } else if (attr == &ttm_mem_swap)
  128. zone->swap_limit = val64;
  129. spin_unlock(&zone->glob->lock);
  130. ttm_check_swapping(zone->glob);
  131. return size;
  132. }
  133. static struct attribute *ttm_mem_zone_attrs[] = {
  134. &ttm_mem_sys,
  135. &ttm_mem_emer,
  136. &ttm_mem_max,
  137. &ttm_mem_swap,
  138. &ttm_mem_used,
  139. NULL
  140. };
  141. static const struct sysfs_ops ttm_mem_zone_ops = {
  142. .show = &ttm_mem_zone_show,
  143. .store = &ttm_mem_zone_store
  144. };
  145. static struct kobj_type ttm_mem_zone_kobj_type = {
  146. .release = &ttm_mem_zone_kobj_release,
  147. .sysfs_ops = &ttm_mem_zone_ops,
  148. .default_attrs = ttm_mem_zone_attrs,
  149. };
  150. static struct attribute ttm_mem_global_lower_mem_limit = {
  151. .name = "lower_mem_limit",
  152. .mode = S_IRUGO | S_IWUSR
  153. };
  154. static ssize_t ttm_mem_global_show(struct kobject *kobj,
  155. struct attribute *attr,
  156. char *buffer)
  157. {
  158. struct ttm_mem_global *glob =
  159. container_of(kobj, struct ttm_mem_global, kobj);
  160. uint64_t val = 0;
  161. spin_lock(&glob->lock);
  162. val = glob->lower_mem_limit;
  163. spin_unlock(&glob->lock);
  164. /* convert from number of pages to KB */
  165. val <<= (PAGE_SHIFT - 10);
  166. return snprintf(buffer, PAGE_SIZE, "%llu\n",
  167. (unsigned long long) val);
  168. }
  169. static ssize_t ttm_mem_global_store(struct kobject *kobj,
  170. struct attribute *attr,
  171. const char *buffer,
  172. size_t size)
  173. {
  174. int chars;
  175. uint64_t val64;
  176. unsigned long val;
  177. struct ttm_mem_global *glob =
  178. container_of(kobj, struct ttm_mem_global, kobj);
  179. chars = sscanf(buffer, "%lu", &val);
  180. if (chars == 0)
  181. return size;
  182. val64 = val;
  183. /* convert from KB to number of pages */
  184. val64 >>= (PAGE_SHIFT - 10);
  185. spin_lock(&glob->lock);
  186. glob->lower_mem_limit = val64;
  187. spin_unlock(&glob->lock);
  188. return size;
  189. }
  190. static struct attribute *ttm_mem_global_attrs[] = {
  191. &ttm_mem_global_lower_mem_limit,
  192. NULL
  193. };
  194. static const struct sysfs_ops ttm_mem_global_ops = {
  195. .show = &ttm_mem_global_show,
  196. .store = &ttm_mem_global_store,
  197. };
  198. static struct kobj_type ttm_mem_glob_kobj_type = {
  199. .sysfs_ops = &ttm_mem_global_ops,
  200. .default_attrs = ttm_mem_global_attrs,
  201. };
  202. static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
  203. bool from_wq, uint64_t extra)
  204. {
  205. unsigned int i;
  206. struct ttm_mem_zone *zone;
  207. uint64_t target;
  208. for (i = 0; i < glob->num_zones; ++i) {
  209. zone = glob->zones[i];
  210. if (from_wq)
  211. target = zone->swap_limit;
  212. else if (capable(CAP_SYS_ADMIN))
  213. target = zone->emer_mem;
  214. else
  215. target = zone->max_mem;
  216. target = (extra > target) ? 0ULL : target;
  217. if (zone->used_mem > target)
  218. return true;
  219. }
  220. return false;
  221. }
  222. /**
  223. * At this point we only support a single shrink callback.
  224. * Extend this if needed, perhaps using a linked list of callbacks.
  225. * Note that this function is reentrant:
  226. * many threads may try to swap out at any given time.
  227. */
  228. static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
  229. uint64_t extra, struct ttm_operation_ctx *ctx)
  230. {
  231. int ret;
  232. spin_lock(&glob->lock);
  233. while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
  234. spin_unlock(&glob->lock);
  235. ret = ttm_bo_swapout(glob->bo_glob, ctx);
  236. spin_lock(&glob->lock);
  237. if (unlikely(ret != 0))
  238. break;
  239. }
  240. spin_unlock(&glob->lock);
  241. }
  242. static void ttm_shrink_work(struct work_struct *work)
  243. {
  244. struct ttm_operation_ctx ctx = {
  245. .interruptible = false,
  246. .no_wait_gpu = false
  247. };
  248. struct ttm_mem_global *glob =
  249. container_of(work, struct ttm_mem_global, work);
  250. ttm_shrink(glob, true, 0ULL, &ctx);
  251. }
  252. static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
  253. const struct sysinfo *si)
  254. {
  255. struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
  256. uint64_t mem;
  257. int ret;
  258. if (unlikely(!zone))
  259. return -ENOMEM;
  260. mem = si->totalram - si->totalhigh;
  261. mem *= si->mem_unit;
  262. zone->name = "kernel";
  263. zone->zone_mem = mem;
  264. zone->max_mem = mem >> 1;
  265. zone->emer_mem = (mem >> 1) + (mem >> 2);
  266. zone->swap_limit = zone->max_mem - (mem >> 3);
  267. zone->used_mem = 0;
  268. zone->glob = glob;
  269. glob->zone_kernel = zone;
  270. ret = kobject_init_and_add(
  271. &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
  272. if (unlikely(ret != 0)) {
  273. kobject_put(&zone->kobj);
  274. return ret;
  275. }
  276. glob->zones[glob->num_zones++] = zone;
  277. return 0;
  278. }
  279. #ifdef CONFIG_HIGHMEM
  280. static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
  281. const struct sysinfo *si)
  282. {
  283. struct ttm_mem_zone *zone;
  284. uint64_t mem;
  285. int ret;
  286. if (si->totalhigh == 0)
  287. return 0;
  288. zone = kzalloc(sizeof(*zone), GFP_KERNEL);
  289. if (unlikely(!zone))
  290. return -ENOMEM;
  291. mem = si->totalram;
  292. mem *= si->mem_unit;
  293. zone->name = "highmem";
  294. zone->zone_mem = mem;
  295. zone->max_mem = mem >> 1;
  296. zone->emer_mem = (mem >> 1) + (mem >> 2);
  297. zone->swap_limit = zone->max_mem - (mem >> 3);
  298. zone->used_mem = 0;
  299. zone->glob = glob;
  300. glob->zone_highmem = zone;
  301. ret = kobject_init_and_add(
  302. &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
  303. zone->name);
  304. if (unlikely(ret != 0)) {
  305. kobject_put(&zone->kobj);
  306. return ret;
  307. }
  308. glob->zones[glob->num_zones++] = zone;
  309. return 0;
  310. }
  311. #else
  312. static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
  313. const struct sysinfo *si)
  314. {
  315. struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
  316. uint64_t mem;
  317. int ret;
  318. if (unlikely(!zone))
  319. return -ENOMEM;
  320. mem = si->totalram;
  321. mem *= si->mem_unit;
  322. /**
  323. * No special dma32 zone needed.
  324. */
  325. if (mem <= ((uint64_t) 1ULL << 32)) {
  326. kfree(zone);
  327. return 0;
  328. }
  329. /*
  330. * Limit max dma32 memory to 4GB for now
  331. * until we can figure out how big this
  332. * zone really is.
  333. */
  334. mem = ((uint64_t) 1ULL << 32);
  335. zone->name = "dma32";
  336. zone->zone_mem = mem;
  337. zone->max_mem = mem >> 1;
  338. zone->emer_mem = (mem >> 1) + (mem >> 2);
  339. zone->swap_limit = zone->max_mem - (mem >> 3);
  340. zone->used_mem = 0;
  341. zone->glob = glob;
  342. glob->zone_dma32 = zone;
  343. ret = kobject_init_and_add(
  344. &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
  345. if (unlikely(ret != 0)) {
  346. kobject_put(&zone->kobj);
  347. return ret;
  348. }
  349. glob->zones[glob->num_zones++] = zone;
  350. return 0;
  351. }
  352. #endif
  353. int ttm_mem_global_init(struct ttm_mem_global *glob)
  354. {
  355. struct sysinfo si;
  356. int ret;
  357. int i;
  358. struct ttm_mem_zone *zone;
  359. spin_lock_init(&glob->lock);
  360. glob->swap_queue = create_singlethread_workqueue("ttm_swap");
  361. INIT_WORK(&glob->work, ttm_shrink_work);
  362. ret = kobject_init_and_add(
  363. &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
  364. if (unlikely(ret != 0)) {
  365. kobject_put(&glob->kobj);
  366. return ret;
  367. }
  368. si_meminfo(&si);
  369. /* set it as 0 by default to keep original behavior of OOM */
  370. glob->lower_mem_limit = 0;
  371. ret = ttm_mem_init_kernel_zone(glob, &si);
  372. if (unlikely(ret != 0))
  373. goto out_no_zone;
  374. #ifdef CONFIG_HIGHMEM
  375. ret = ttm_mem_init_highmem_zone(glob, &si);
  376. if (unlikely(ret != 0))
  377. goto out_no_zone;
  378. #else
  379. ret = ttm_mem_init_dma32_zone(glob, &si);
  380. if (unlikely(ret != 0))
  381. goto out_no_zone;
  382. #endif
  383. for (i = 0; i < glob->num_zones; ++i) {
  384. zone = glob->zones[i];
  385. pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
  386. zone->name, (unsigned long long)zone->max_mem >> 10);
  387. }
  388. ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
  389. ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
  390. return 0;
  391. out_no_zone:
  392. ttm_mem_global_release(glob);
  393. return ret;
  394. }
  395. EXPORT_SYMBOL(ttm_mem_global_init);
  396. void ttm_mem_global_release(struct ttm_mem_global *glob)
  397. {
  398. unsigned int i;
  399. struct ttm_mem_zone *zone;
  400. /* let the page allocator first stop the shrink work. */
  401. ttm_page_alloc_fini();
  402. ttm_dma_page_alloc_fini();
  403. flush_workqueue(glob->swap_queue);
  404. destroy_workqueue(glob->swap_queue);
  405. glob->swap_queue = NULL;
  406. for (i = 0; i < glob->num_zones; ++i) {
  407. zone = glob->zones[i];
  408. kobject_del(&zone->kobj);
  409. kobject_put(&zone->kobj);
  410. }
  411. kobject_del(&glob->kobj);
  412. kobject_put(&glob->kobj);
  413. }
  414. EXPORT_SYMBOL(ttm_mem_global_release);
  415. static void ttm_check_swapping(struct ttm_mem_global *glob)
  416. {
  417. bool needs_swapping = false;
  418. unsigned int i;
  419. struct ttm_mem_zone *zone;
  420. spin_lock(&glob->lock);
  421. for (i = 0; i < glob->num_zones; ++i) {
  422. zone = glob->zones[i];
  423. if (zone->used_mem > zone->swap_limit) {
  424. needs_swapping = true;
  425. break;
  426. }
  427. }
  428. spin_unlock(&glob->lock);
  429. if (unlikely(needs_swapping))
  430. (void)queue_work(glob->swap_queue, &glob->work);
  431. }
  432. static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
  433. struct ttm_mem_zone *single_zone,
  434. uint64_t amount)
  435. {
  436. unsigned int i;
  437. struct ttm_mem_zone *zone;
  438. spin_lock(&glob->lock);
  439. for (i = 0; i < glob->num_zones; ++i) {
  440. zone = glob->zones[i];
  441. if (single_zone && zone != single_zone)
  442. continue;
  443. zone->used_mem -= amount;
  444. }
  445. spin_unlock(&glob->lock);
  446. }
  447. void ttm_mem_global_free(struct ttm_mem_global *glob,
  448. uint64_t amount)
  449. {
  450. return ttm_mem_global_free_zone(glob, NULL, amount);
  451. }
  452. EXPORT_SYMBOL(ttm_mem_global_free);
  453. /*
  454. * check if the available mem is under lower memory limit
  455. *
  456. * a. if no swap disk at all or free swap space is under swap_mem_limit
  457. * but available system mem is bigger than sys_mem_limit, allow TTM
  458. * allocation;
  459. *
  460. * b. if the available system mem is less than sys_mem_limit but free
  461. * swap disk is bigger than swap_mem_limit, allow TTM allocation.
  462. */
  463. bool
  464. ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
  465. uint64_t num_pages,
  466. struct ttm_operation_ctx *ctx)
  467. {
  468. int64_t available;
  469. if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
  470. return false;
  471. available = get_nr_swap_pages() + si_mem_available();
  472. available -= num_pages;
  473. if (available < glob->lower_mem_limit)
  474. return true;
  475. return false;
  476. }
  477. EXPORT_SYMBOL(ttm_check_under_lowerlimit);
  478. static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
  479. struct ttm_mem_zone *single_zone,
  480. uint64_t amount, bool reserve)
  481. {
  482. uint64_t limit;
  483. int ret = -ENOMEM;
  484. unsigned int i;
  485. struct ttm_mem_zone *zone;
  486. spin_lock(&glob->lock);
  487. for (i = 0; i < glob->num_zones; ++i) {
  488. zone = glob->zones[i];
  489. if (single_zone && zone != single_zone)
  490. continue;
  491. limit = (capable(CAP_SYS_ADMIN)) ?
  492. zone->emer_mem : zone->max_mem;
  493. if (zone->used_mem > limit)
  494. goto out_unlock;
  495. }
  496. if (reserve) {
  497. for (i = 0; i < glob->num_zones; ++i) {
  498. zone = glob->zones[i];
  499. if (single_zone && zone != single_zone)
  500. continue;
  501. zone->used_mem += amount;
  502. }
  503. }
  504. ret = 0;
  505. out_unlock:
  506. spin_unlock(&glob->lock);
  507. ttm_check_swapping(glob);
  508. return ret;
  509. }
  510. static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
  511. struct ttm_mem_zone *single_zone,
  512. uint64_t memory,
  513. struct ttm_operation_ctx *ctx)
  514. {
  515. int count = TTM_MEMORY_ALLOC_RETRIES;
  516. while (unlikely(ttm_mem_global_reserve(glob,
  517. single_zone,
  518. memory, true)
  519. != 0)) {
  520. if (ctx->no_wait_gpu)
  521. return -ENOMEM;
  522. if (unlikely(count-- == 0))
  523. return -ENOMEM;
  524. ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
  525. }
  526. return 0;
  527. }
  528. int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
  529. struct ttm_operation_ctx *ctx)
  530. {
  531. /**
  532. * Normal allocations of kernel memory are registered in
  533. * all zones.
  534. */
  535. return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
  536. }
  537. EXPORT_SYMBOL(ttm_mem_global_alloc);
  538. int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
  539. struct page *page, uint64_t size,
  540. struct ttm_operation_ctx *ctx)
  541. {
  542. struct ttm_mem_zone *zone = NULL;
  543. /**
  544. * Page allocations may be registed in a single zone
  545. * only if highmem or !dma32.
  546. */
  547. #ifdef CONFIG_HIGHMEM
  548. if (PageHighMem(page) && glob->zone_highmem != NULL)
  549. zone = glob->zone_highmem;
  550. #else
  551. if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
  552. zone = glob->zone_kernel;
  553. #endif
  554. return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
  555. }
  556. void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
  557. uint64_t size)
  558. {
  559. struct ttm_mem_zone *zone = NULL;
  560. #ifdef CONFIG_HIGHMEM
  561. if (PageHighMem(page) && glob->zone_highmem != NULL)
  562. zone = glob->zone_highmem;
  563. #else
  564. if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
  565. zone = glob->zone_kernel;
  566. #endif
  567. ttm_mem_global_free_zone(glob, zone, size);
  568. }
  569. size_t ttm_round_pot(size_t size)
  570. {
  571. if ((size & (size - 1)) == 0)
  572. return size;
  573. else if (size > PAGE_SIZE)
  574. return PAGE_ALIGN(size);
  575. else {
  576. size_t tmp_size = 4;
  577. while (tmp_size < size)
  578. tmp_size <<= 1;
  579. return tmp_size;
  580. }
  581. return 0;
  582. }
  583. EXPORT_SYMBOL(ttm_round_pot);
  584. uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
  585. {
  586. return glob->zone_kernel->max_mem;
  587. }
  588. EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);