kmem.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #ifndef __XFS_SUPPORT_KMEM_H__
  7. #define __XFS_SUPPORT_KMEM_H__
  8. #include <linux/slab.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/vmalloc.h>
  12. /*
  13. * General memory allocation interfaces
  14. */
  15. typedef unsigned __bitwise xfs_km_flags_t;
  16. #define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
  17. #define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
  18. #define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
  19. #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
  20. #define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
  21. /*
  22. * We use a special process flag to avoid recursive callbacks into
  23. * the filesystem during transactions. We will also issue our own
  24. * warnings, so we explicitly skip any generic ones (silly of us).
  25. */
  26. static inline gfp_t
  27. kmem_flags_convert(xfs_km_flags_t flags)
  28. {
  29. gfp_t lflags;
  30. BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
  31. if (flags & KM_NOSLEEP) {
  32. lflags = GFP_ATOMIC | __GFP_NOWARN;
  33. } else {
  34. lflags = GFP_KERNEL | __GFP_NOWARN;
  35. if (flags & KM_NOFS)
  36. lflags &= ~__GFP_FS;
  37. }
  38. /*
  39. * Default page/slab allocator behavior is to retry for ever
  40. * for small allocations. We can override this behavior by using
  41. * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
  42. * as it is feasible but rather fail than retry forever for all
  43. * request sizes.
  44. */
  45. if (flags & KM_MAYFAIL)
  46. lflags |= __GFP_RETRY_MAYFAIL;
  47. if (flags & KM_ZERO)
  48. lflags |= __GFP_ZERO;
  49. return lflags;
  50. }
  51. extern void *kmem_alloc(size_t, xfs_km_flags_t);
  52. extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
  53. extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
  54. static inline void kmem_free(const void *ptr)
  55. {
  56. kvfree(ptr);
  57. }
  58. static inline void *
  59. kmem_zalloc(size_t size, xfs_km_flags_t flags)
  60. {
  61. return kmem_alloc(size, flags | KM_ZERO);
  62. }
  63. static inline void *
  64. kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
  65. {
  66. return kmem_alloc_large(size, flags | KM_ZERO);
  67. }
  68. /*
  69. * Zone interfaces
  70. */
  71. #define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
  72. #define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
  73. #define KM_ZONE_SPREAD SLAB_MEM_SPREAD
  74. #define KM_ZONE_ACCOUNT SLAB_ACCOUNT
  75. #define kmem_zone kmem_cache
  76. #define kmem_zone_t struct kmem_cache
  77. static inline kmem_zone_t *
  78. kmem_zone_init(int size, char *zone_name)
  79. {
  80. return kmem_cache_create(zone_name, size, 0, 0, NULL);
  81. }
  82. static inline kmem_zone_t *
  83. kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
  84. void (*construct)(void *))
  85. {
  86. return kmem_cache_create(zone_name, size, 0, flags, construct);
  87. }
  88. static inline void
  89. kmem_zone_free(kmem_zone_t *zone, void *ptr)
  90. {
  91. kmem_cache_free(zone, ptr);
  92. }
  93. static inline void
  94. kmem_zone_destroy(kmem_zone_t *zone)
  95. {
  96. kmem_cache_destroy(zone);
  97. }
  98. extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
  99. static inline void *
  100. kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
  101. {
  102. return kmem_zone_alloc(zone, flags | KM_ZERO);
  103. }
  104. #endif /* __XFS_SUPPORT_KMEM_H__ */