context.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, NVIDIA Corporation.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/kref.h>
  7. #include <linux/of.h>
  8. #include <linux/of_device.h>
  9. #include <linux/pid.h>
  10. #include <linux/slab.h>
  11. #include "context.h"
  12. #include "dev.h"
  13. static void host1x_memory_context_release(struct device *dev)
  14. {
  15. /* context device is freed in host1x_memory_context_list_free() */
  16. }
  17. int host1x_memory_context_list_init(struct host1x *host1x)
  18. {
  19. struct host1x_memory_context_list *cdl = &host1x->context_list;
  20. struct device_node *node = host1x->dev->of_node;
  21. struct host1x_memory_context *ctx;
  22. unsigned int i;
  23. int err;
  24. cdl->devs = NULL;
  25. cdl->len = 0;
  26. mutex_init(&cdl->lock);
  27. err = of_property_count_u32_elems(node, "iommu-map");
  28. if (err < 0)
  29. return 0;
  30. cdl->len = err / 4;
  31. cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
  32. if (!cdl->devs)
  33. return -ENOMEM;
  34. for (i = 0; i < cdl->len; i++) {
  35. ctx = &cdl->devs[i];
  36. ctx->host = host1x;
  37. device_initialize(&ctx->dev);
  38. /*
  39. * Due to an issue with T194 NVENC, only 38 bits can be used.
  40. * Anyway, 256GiB of IOVA ought to be enough for anyone.
  41. */
  42. ctx->dma_mask = DMA_BIT_MASK(38);
  43. ctx->dev.dma_mask = &ctx->dma_mask;
  44. ctx->dev.coherent_dma_mask = ctx->dma_mask;
  45. dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
  46. ctx->dev.bus = &host1x_context_device_bus_type;
  47. ctx->dev.parent = host1x->dev;
  48. ctx->dev.release = host1x_memory_context_release;
  49. ctx->dev.dma_parms = &ctx->dma_parms;
  50. dma_set_max_seg_size(&ctx->dev, UINT_MAX);
  51. err = device_add(&ctx->dev);
  52. if (err) {
  53. dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
  54. put_device(&ctx->dev);
  55. goto unreg_devices;
  56. }
  57. err = of_dma_configure_id(&ctx->dev, node, true, &i);
  58. if (err) {
  59. dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
  60. i, err);
  61. device_unregister(&ctx->dev);
  62. goto unreg_devices;
  63. }
  64. if (!tegra_dev_iommu_get_stream_id(&ctx->dev, &ctx->stream_id) ||
  65. !device_iommu_mapped(&ctx->dev)) {
  66. dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
  67. device_unregister(&ctx->dev);
  68. /*
  69. * This means that if IOMMU is disabled but context devices
  70. * are defined in the device tree, Host1x will fail to probe.
  71. * That's probably OK in this time and age.
  72. */
  73. err = -EINVAL;
  74. goto unreg_devices;
  75. }
  76. }
  77. return 0;
  78. unreg_devices:
  79. while (i--)
  80. device_unregister(&cdl->devs[i].dev);
  81. kfree(cdl->devs);
  82. cdl->devs = NULL;
  83. cdl->len = 0;
  84. return err;
  85. }
  86. void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
  87. {
  88. unsigned int i;
  89. for (i = 0; i < cdl->len; i++)
  90. device_unregister(&cdl->devs[i].dev);
  91. kfree(cdl->devs);
  92. cdl->len = 0;
  93. }
  94. struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
  95. struct device *dev,
  96. struct pid *pid)
  97. {
  98. struct host1x_memory_context_list *cdl = &host1x->context_list;
  99. struct host1x_memory_context *free = NULL;
  100. int i;
  101. if (!cdl->len)
  102. return ERR_PTR(-EOPNOTSUPP);
  103. mutex_lock(&cdl->lock);
  104. for (i = 0; i < cdl->len; i++) {
  105. struct host1x_memory_context *cd = &cdl->devs[i];
  106. if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev)
  107. continue;
  108. if (cd->owner == pid) {
  109. refcount_inc(&cd->ref);
  110. mutex_unlock(&cdl->lock);
  111. return cd;
  112. } else if (!cd->owner && !free) {
  113. free = cd;
  114. }
  115. }
  116. if (!free) {
  117. mutex_unlock(&cdl->lock);
  118. return ERR_PTR(-EBUSY);
  119. }
  120. refcount_set(&free->ref, 1);
  121. free->owner = get_pid(pid);
  122. mutex_unlock(&cdl->lock);
  123. return free;
  124. }
  125. EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
  126. void host1x_memory_context_get(struct host1x_memory_context *cd)
  127. {
  128. refcount_inc(&cd->ref);
  129. }
  130. EXPORT_SYMBOL_GPL(host1x_memory_context_get);
  131. void host1x_memory_context_put(struct host1x_memory_context *cd)
  132. {
  133. struct host1x_memory_context_list *cdl = &cd->host->context_list;
  134. if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
  135. put_pid(cd->owner);
  136. cd->owner = NULL;
  137. mutex_unlock(&cdl->lock);
  138. }
  139. }
  140. EXPORT_SYMBOL_GPL(host1x_memory_context_put);