vkms_gem.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include <linux/shmem_fs.h>
  3. #include "vkms_drv.h"
  4. static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
  5. u64 size)
  6. {
  7. struct vkms_gem_object *obj;
  8. int ret;
  9. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  10. if (!obj)
  11. return ERR_PTR(-ENOMEM);
  12. size = roundup(size, PAGE_SIZE);
  13. ret = drm_gem_object_init(dev, &obj->gem, size);
  14. if (ret) {
  15. kfree(obj);
  16. return ERR_PTR(ret);
  17. }
  18. mutex_init(&obj->pages_lock);
  19. return obj;
  20. }
  21. void vkms_gem_free_object(struct drm_gem_object *obj)
  22. {
  23. struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
  24. gem);
  25. kvfree(gem->pages);
  26. mutex_destroy(&gem->pages_lock);
  27. drm_gem_object_release(obj);
  28. kfree(gem);
  29. }
  30. int vkms_gem_fault(struct vm_fault *vmf)
  31. {
  32. struct vm_area_struct *vma = vmf->vma;
  33. struct vkms_gem_object *obj = vma->vm_private_data;
  34. unsigned long vaddr = vmf->address;
  35. pgoff_t page_offset;
  36. loff_t num_pages;
  37. int ret;
  38. page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
  39. num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
  40. if (page_offset > num_pages)
  41. return VM_FAULT_SIGBUS;
  42. ret = -ENOENT;
  43. mutex_lock(&obj->pages_lock);
  44. if (obj->pages) {
  45. get_page(obj->pages[page_offset]);
  46. vmf->page = obj->pages[page_offset];
  47. ret = 0;
  48. }
  49. mutex_unlock(&obj->pages_lock);
  50. if (ret) {
  51. struct page *page;
  52. struct address_space *mapping;
  53. mapping = file_inode(obj->gem.filp)->i_mapping;
  54. page = shmem_read_mapping_page(mapping, page_offset);
  55. if (!IS_ERR(page)) {
  56. vmf->page = page;
  57. ret = 0;
  58. } else {
  59. switch (PTR_ERR(page)) {
  60. case -ENOSPC:
  61. case -ENOMEM:
  62. ret = VM_FAULT_OOM;
  63. break;
  64. case -EBUSY:
  65. ret = VM_FAULT_RETRY;
  66. break;
  67. case -EFAULT:
  68. case -EINVAL:
  69. ret = VM_FAULT_SIGBUS;
  70. break;
  71. default:
  72. WARN_ON(PTR_ERR(page));
  73. ret = VM_FAULT_SIGBUS;
  74. break;
  75. }
  76. }
  77. }
  78. return ret;
  79. }
  80. static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
  81. struct drm_file *file,
  82. u32 *handle,
  83. u64 size)
  84. {
  85. struct vkms_gem_object *obj;
  86. int ret;
  87. if (!file || !dev || !handle)
  88. return ERR_PTR(-EINVAL);
  89. obj = __vkms_gem_create(dev, size);
  90. if (IS_ERR(obj))
  91. return ERR_CAST(obj);
  92. ret = drm_gem_handle_create(file, &obj->gem, handle);
  93. if (ret)
  94. return ERR_PTR(ret);
  95. return &obj->gem;
  96. }
  97. int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
  98. struct drm_mode_create_dumb *args)
  99. {
  100. struct drm_gem_object *gem_obj;
  101. u64 pitch, size;
  102. if (!args || !dev || !file)
  103. return -EINVAL;
  104. pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
  105. size = pitch * args->height;
  106. if (!size)
  107. return -EINVAL;
  108. gem_obj = vkms_gem_create(dev, file, &args->handle, size);
  109. if (IS_ERR(gem_obj))
  110. return PTR_ERR(gem_obj);
  111. args->size = gem_obj->size;
  112. args->pitch = pitch;
  113. drm_gem_object_put_unlocked(gem_obj);
  114. DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
  115. return 0;
  116. }
  117. int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
  118. u32 handle, u64 *offset)
  119. {
  120. struct drm_gem_object *obj;
  121. int ret;
  122. obj = drm_gem_object_lookup(file, handle);
  123. if (!obj)
  124. return -ENOENT;
  125. if (!obj->filp) {
  126. ret = -EINVAL;
  127. goto unref;
  128. }
  129. ret = drm_gem_create_mmap_offset(obj);
  130. if (ret)
  131. goto unref;
  132. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  133. unref:
  134. drm_gem_object_put_unlocked(obj);
  135. return ret;
  136. }