hns_roce_db.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
  2. /*
  3. * Copyright (c) 2017 Hisilicon Limited.
  4. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  5. */
  6. #include <linux/platform_device.h>
  7. #include <rdma/ib_umem.h>
  8. #include "hns_roce_device.h"
  9. int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
  10. struct hns_roce_db *db)
  11. {
  12. struct hns_roce_user_db_page *page;
  13. int ret = 0;
  14. mutex_lock(&context->page_mutex);
  15. list_for_each_entry(page, &context->page_list, list)
  16. if (page->user_virt == (virt & PAGE_MASK))
  17. goto found;
  18. page = kmalloc(sizeof(*page), GFP_KERNEL);
  19. if (!page) {
  20. ret = -ENOMEM;
  21. goto out;
  22. }
  23. refcount_set(&page->refcount, 1);
  24. page->user_virt = (virt & PAGE_MASK);
  25. page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
  26. PAGE_SIZE, 0, 0);
  27. if (IS_ERR(page->umem)) {
  28. ret = PTR_ERR(page->umem);
  29. kfree(page);
  30. goto out;
  31. }
  32. list_add(&page->list, &context->page_list);
  33. found:
  34. db->dma = sg_dma_address(page->umem->sg_head.sgl) +
  35. (virt & ~PAGE_MASK);
  36. page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
  37. db->virt_addr = sg_virt(page->umem->sg_head.sgl);
  38. db->u.user_page = page;
  39. refcount_inc(&page->refcount);
  40. out:
  41. mutex_unlock(&context->page_mutex);
  42. return ret;
  43. }
  44. EXPORT_SYMBOL(hns_roce_db_map_user);
  45. void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
  46. struct hns_roce_db *db)
  47. {
  48. mutex_lock(&context->page_mutex);
  49. refcount_dec(&db->u.user_page->refcount);
  50. if (refcount_dec_if_one(&db->u.user_page->refcount)) {
  51. list_del(&db->u.user_page->list);
  52. ib_umem_release(db->u.user_page->umem);
  53. kfree(db->u.user_page);
  54. }
  55. mutex_unlock(&context->page_mutex);
  56. }
  57. EXPORT_SYMBOL(hns_roce_db_unmap_user);
  58. static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
  59. struct device *dma_device)
  60. {
  61. struct hns_roce_db_pgdir *pgdir;
  62. pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
  63. if (!pgdir)
  64. return NULL;
  65. bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2);
  66. pgdir->bits[0] = pgdir->order0;
  67. pgdir->bits[1] = pgdir->order1;
  68. pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
  69. &pgdir->db_dma, GFP_KERNEL);
  70. if (!pgdir->page) {
  71. kfree(pgdir);
  72. return NULL;
  73. }
  74. return pgdir;
  75. }
  76. static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
  77. struct hns_roce_db *db, int order)
  78. {
  79. int o;
  80. int i;
  81. for (o = order; o <= 1; ++o) {
  82. i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
  83. if (i < HNS_ROCE_DB_PER_PAGE >> o)
  84. goto found;
  85. }
  86. return -ENOMEM;
  87. found:
  88. clear_bit(i, pgdir->bits[o]);
  89. i <<= o;
  90. if (o > order)
  91. set_bit(i ^ 1, pgdir->bits[order]);
  92. db->u.pgdir = pgdir;
  93. db->index = i;
  94. db->db_record = pgdir->page + db->index;
  95. db->dma = pgdir->db_dma + db->index * 4;
  96. db->order = order;
  97. return 0;
  98. }
  99. int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
  100. int order)
  101. {
  102. struct hns_roce_db_pgdir *pgdir;
  103. int ret = 0;
  104. mutex_lock(&hr_dev->pgdir_mutex);
  105. list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
  106. if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
  107. goto out;
  108. pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
  109. if (!pgdir) {
  110. ret = -ENOMEM;
  111. goto out;
  112. }
  113. list_add(&pgdir->list, &hr_dev->pgdir_list);
  114. /* This should never fail -- we just allocated an empty page: */
  115. WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
  116. out:
  117. mutex_unlock(&hr_dev->pgdir_mutex);
  118. return ret;
  119. }
  120. EXPORT_SYMBOL_GPL(hns_roce_alloc_db);
  121. void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
  122. {
  123. int o;
  124. int i;
  125. mutex_lock(&hr_dev->pgdir_mutex);
  126. o = db->order;
  127. i = db->index;
  128. if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
  129. clear_bit(i ^ 1, db->u.pgdir->order0);
  130. ++o;
  131. }
  132. i >>= o;
  133. set_bit(i, db->u.pgdir->bits[o]);
  134. if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) {
  135. dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
  136. db->u.pgdir->db_dma);
  137. list_del(&db->u.pgdir->list);
  138. kfree(db->u.pgdir);
  139. }
  140. mutex_unlock(&hr_dev->pgdir_mutex);
  141. }
  142. EXPORT_SYMBOL_GPL(hns_roce_free_db);