etnaviv_drv.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #ifndef __ETNAVIV_DRV_H__
  6. #define __ETNAVIV_DRV_H__
  7. #include <linux/kernel.h>
  8. #include <linux/clk.h>
  9. #include <linux/cpufreq.h>
  10. #include <linux/module.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/pm.h>
  13. #include <linux/pm_runtime.h>
  14. #include <linux/slab.h>
  15. #include <linux/list.h>
  16. #include <linux/time64.h>
  17. #include <linux/types.h>
  18. #include <linux/sizes.h>
  19. #include <linux/mm_types.h>
  20. #include <drm/drmP.h>
  21. #include <drm/drm_crtc_helper.h>
  22. #include <drm/drm_fb_helper.h>
  23. #include <drm/drm_gem.h>
  24. #include <drm/etnaviv_drm.h>
  25. #include <drm/gpu_scheduler.h>
  26. struct etnaviv_cmdbuf;
  27. struct etnaviv_gpu;
  28. struct etnaviv_mmu;
  29. struct etnaviv_gem_object;
  30. struct etnaviv_gem_submit;
  31. struct etnaviv_file_private {
  32. /*
  33. * When per-context address spaces are supported we'd keep track of
  34. * the context's page-tables here.
  35. */
  36. struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
  37. };
  38. struct etnaviv_drm_private {
  39. int num_gpus;
  40. struct device_dma_parameters dma_parms;
  41. struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
  42. /* list of GEM objects: */
  43. struct mutex gem_lock;
  44. struct list_head gem_list;
  45. };
  46. int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
  47. struct drm_file *file);
  48. int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
  49. vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
  50. int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
  51. struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
  52. void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
  53. void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  54. int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
  55. struct vm_area_struct *vma);
  56. struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj);
  57. struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
  58. struct dma_buf_attachment *attach, struct sg_table *sg);
  59. int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
  60. void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
  61. void *etnaviv_gem_vmap(struct drm_gem_object *obj);
  62. int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
  63. struct timespec *timeout);
  64. int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
  65. void etnaviv_gem_free_object(struct drm_gem_object *obj);
  66. int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  67. u32 size, u32 flags, u32 *handle);
  68. int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
  69. uintptr_t ptr, u32 size, u32 flags, u32 *handle);
  70. u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
  71. u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
  72. u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
  73. void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
  74. void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
  75. void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
  76. unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
  77. void etnaviv_validate_init(void);
  78. bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
  79. u32 *stream, unsigned int size,
  80. struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
  81. #ifdef CONFIG_DEBUG_FS
  82. void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
  83. struct seq_file *m);
  84. #endif
  85. #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
  86. #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
  87. /*
  88. * Return the storage size of a structure with a variable length array.
  89. * The array is nelem elements of elem_size, where the base structure
  90. * is defined by base. If the size overflows size_t, return zero.
  91. */
  92. static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
  93. {
  94. if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
  95. return 0;
  96. return base + nelem * elem_size;
  97. }
  98. /* returns true if fence a comes after fence b */
  99. static inline bool fence_after(u32 a, u32 b)
  100. {
  101. return (s32)(a - b) > 0;
  102. }
  103. static inline bool fence_after_eq(u32 a, u32 b)
  104. {
  105. return (s32)(a - b) >= 0;
  106. }
  107. /*
  108. * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
  109. * We need to calculate the timeout in terms of number of jiffies
  110. * between the specified timeout and the current CLOCK_MONOTONIC time.
  111. */
  112. static inline unsigned long etnaviv_timeout_to_jiffies(
  113. const struct timespec *timeout)
  114. {
  115. struct timespec64 ts, to;
  116. to = timespec_to_timespec64(*timeout);
  117. ktime_get_ts64(&ts);
  118. /* timeouts before "now" have already expired */
  119. if (timespec64_compare(&to, &ts) <= 0)
  120. return 0;
  121. ts = timespec64_sub(to, ts);
  122. return timespec64_to_jiffies(&ts);
  123. }
  124. #endif /* __ETNAVIV_DRV_H__ */