i40iw_pble.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include "i40iw_status.h"
  35. #include "i40iw_osdep.h"
  36. #include "i40iw_register.h"
  37. #include "i40iw_hmc.h"
  38. #include "i40iw_d.h"
  39. #include "i40iw_type.h"
  40. #include "i40iw_p.h"
  41. #include <linux/pci.h>
  42. #include <linux/genalloc.h>
  43. #include <linux/vmalloc.h>
  44. #include "i40iw_pble.h"
  45. #include "i40iw.h"
  46. struct i40iw_device;
  47. static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
  48. struct i40iw_hmc_pble_rsrc *pble_rsrc);
  49. static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
  50. /**
  51. * i40iw_destroy_pble_pool - destroy pool during module unload
  52. * @pble_rsrc: pble resources
  53. */
  54. void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
  55. {
  56. struct list_head *clist;
  57. struct list_head *tlist;
  58. struct i40iw_chunk *chunk;
  59. struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
  60. if (pinfo->pool) {
  61. list_for_each_safe(clist, tlist, &pinfo->clist) {
  62. chunk = list_entry(clist, struct i40iw_chunk, list);
  63. if (chunk->type == I40IW_VMALLOC)
  64. i40iw_free_vmalloc_mem(dev->hw, chunk);
  65. kfree(chunk);
  66. }
  67. gen_pool_destroy(pinfo->pool);
  68. }
  69. }
  70. /**
  71. * i40iw_hmc_init_pble - Initialize pble resources during module load
  72. * @dev: i40iw_sc_dev struct
  73. * @pble_rsrc: pble resources
  74. */
  75. enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
  76. struct i40iw_hmc_pble_rsrc *pble_rsrc)
  77. {
  78. struct i40iw_hmc_info *hmc_info;
  79. u32 fpm_idx = 0;
  80. hmc_info = dev->hmc_info;
  81. pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
  82. /* Now start the pble' on 4k boundary */
  83. if (pble_rsrc->fpm_base_addr & 0xfff)
  84. fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
  85. pble_rsrc->unallocated_pble =
  86. hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
  87. pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
  88. pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
  89. pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
  90. INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
  91. if (!pble_rsrc->pinfo.pool)
  92. goto error;
  93. if (add_pble_pool(dev, pble_rsrc))
  94. goto error;
  95. return 0;
  96. error:i40iw_destroy_pble_pool(dev, pble_rsrc);
  97. return I40IW_ERR_NO_MEMORY;
  98. }
  99. /**
  100. * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
  101. * @ pble_rsrc: structure containing fpm address
  102. * @ idx: where to return indexes
  103. */
  104. static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  105. struct sd_pd_idx *idx)
  106. {
  107. idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
  108. idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
  109. idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
  110. }
  111. /**
  112. * add_sd_direct - add sd direct for pble
  113. * @dev: hardware control device structure
  114. * @pble_rsrc: pble resource ptr
  115. * @info: page info for sd
  116. */
  117. static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
  118. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  119. struct i40iw_add_page_info *info)
  120. {
  121. enum i40iw_status_code ret_code = 0;
  122. struct sd_pd_idx *idx = &info->idx;
  123. struct i40iw_chunk *chunk = info->chunk;
  124. struct i40iw_hmc_info *hmc_info = info->hmc_info;
  125. struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
  126. u32 offset = 0;
  127. if (!sd_entry->valid) {
  128. if (dev->is_pf) {
  129. ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
  130. info->idx.sd_idx,
  131. I40IW_SD_TYPE_DIRECT,
  132. I40IW_HMC_DIRECT_BP_SIZE);
  133. if (ret_code)
  134. return ret_code;
  135. chunk->type = I40IW_DMA_COHERENT;
  136. }
  137. }
  138. offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
  139. chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
  140. chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
  141. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  142. i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
  143. chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
  144. return 0;
  145. }
  146. /**
  147. * i40iw_free_vmalloc_mem - free vmalloc during close
  148. * @hw: hw struct
  149. * @chunk: chunk information for vmalloc
  150. */
  151. static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
  152. {
  153. struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
  154. int i;
  155. if (!chunk->pg_cnt)
  156. goto done;
  157. for (i = 0; i < chunk->pg_cnt; i++)
  158. dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
  159. done:
  160. kfree(chunk->dmaaddrs);
  161. chunk->dmaaddrs = NULL;
  162. vfree(chunk->vaddr);
  163. chunk->vaddr = NULL;
  164. chunk->type = 0;
  165. }
  166. /**
  167. * i40iw_get_vmalloc_mem - get 2M page for sd
  168. * @hw: hardware address
  169. * @chunk: chunk to adf
  170. * @pg_cnt: #of 4 K pages
  171. */
  172. static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
  173. struct i40iw_chunk *chunk,
  174. int pg_cnt)
  175. {
  176. struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
  177. struct page *page;
  178. u8 *addr;
  179. u32 size;
  180. int i;
  181. chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
  182. if (!chunk->dmaaddrs)
  183. return I40IW_ERR_NO_MEMORY;
  184. size = PAGE_SIZE * pg_cnt;
  185. chunk->vaddr = vmalloc(size);
  186. if (!chunk->vaddr) {
  187. kfree(chunk->dmaaddrs);
  188. chunk->dmaaddrs = NULL;
  189. return I40IW_ERR_NO_MEMORY;
  190. }
  191. chunk->size = size;
  192. addr = (u8 *)chunk->vaddr;
  193. for (i = 0; i < pg_cnt; i++) {
  194. page = vmalloc_to_page((void *)addr);
  195. if (!page)
  196. break;
  197. chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
  198. PAGE_SIZE, DMA_BIDIRECTIONAL);
  199. if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
  200. break;
  201. addr += PAGE_SIZE;
  202. }
  203. chunk->pg_cnt = i;
  204. chunk->type = I40IW_VMALLOC;
  205. if (i == pg_cnt)
  206. return 0;
  207. i40iw_free_vmalloc_mem(hw, chunk);
  208. return I40IW_ERR_NO_MEMORY;
  209. }
  210. /**
  211. * fpm_to_idx - given fpm address, get pble index
  212. * @pble_rsrc: pble resource management
  213. * @addr: fpm address for index
  214. */
  215. static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
  216. {
  217. return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
  218. }
  219. /**
  220. * add_bp_pages - add backing pages for sd
  221. * @dev: hardware control device structure
  222. * @pble_rsrc: pble resource management
  223. * @info: page info for sd
  224. */
  225. static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
  226. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  227. struct i40iw_add_page_info *info)
  228. {
  229. u8 *addr;
  230. struct i40iw_dma_mem mem;
  231. struct i40iw_hmc_pd_entry *pd_entry;
  232. struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
  233. struct i40iw_hmc_info *hmc_info = info->hmc_info;
  234. struct i40iw_chunk *chunk = info->chunk;
  235. struct i40iw_manage_vf_pble_info vf_pble_info;
  236. enum i40iw_status_code status = 0;
  237. u32 rel_pd_idx = info->idx.rel_pd_idx;
  238. u32 pd_idx = info->idx.pd_idx;
  239. u32 i;
  240. status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
  241. if (status)
  242. return I40IW_ERR_NO_MEMORY;
  243. status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
  244. info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
  245. I40IW_HMC_DIRECT_BP_SIZE);
  246. if (status)
  247. goto error;
  248. if (!dev->is_pf) {
  249. status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
  250. fpm_to_idx(pble_rsrc,
  251. pble_rsrc->next_fpm_addr),
  252. (info->pages << PBLE_512_SHIFT));
  253. if (status) {
  254. i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
  255. goto error;
  256. }
  257. }
  258. addr = chunk->vaddr;
  259. for (i = 0; i < info->pages; i++) {
  260. mem.pa = chunk->dmaaddrs[i];
  261. mem.size = PAGE_SIZE;
  262. mem.va = (void *)(addr);
  263. pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
  264. if (!pd_entry->valid) {
  265. status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
  266. if (status)
  267. goto error;
  268. addr += PAGE_SIZE;
  269. } else {
  270. i40iw_pr_err("pd entry is valid expecting to be invalid\n");
  271. }
  272. }
  273. if (!dev->is_pf) {
  274. vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
  275. vf_pble_info.inv_pd_ent = false;
  276. vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
  277. vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
  278. vf_pble_info.sd_index = info->idx.sd_idx;
  279. status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
  280. &vf_pble_info, true);
  281. if (status) {
  282. i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
  283. goto error;
  284. }
  285. }
  286. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  287. return 0;
  288. error:
  289. i40iw_free_vmalloc_mem(dev->hw, chunk);
  290. return status;
  291. }
  292. /**
  293. * add_pble_pool - add a sd entry for pble resoure
  294. * @dev: hardware control device structure
  295. * @pble_rsrc: pble resource management
  296. */
  297. static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
  298. struct i40iw_hmc_pble_rsrc *pble_rsrc)
  299. {
  300. struct i40iw_hmc_sd_entry *sd_entry;
  301. struct i40iw_hmc_info *hmc_info;
  302. struct i40iw_chunk *chunk;
  303. struct i40iw_add_page_info info;
  304. struct sd_pd_idx *idx = &info.idx;
  305. enum i40iw_status_code ret_code = 0;
  306. enum i40iw_sd_entry_type sd_entry_type;
  307. u64 sd_reg_val = 0;
  308. u32 pages;
  309. if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
  310. return I40IW_ERR_NO_MEMORY;
  311. if (pble_rsrc->next_fpm_addr & 0xfff) {
  312. i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
  313. return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
  314. }
  315. chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
  316. if (!chunk)
  317. return I40IW_ERR_NO_MEMORY;
  318. hmc_info = dev->hmc_info;
  319. chunk->fpm_addr = pble_rsrc->next_fpm_addr;
  320. get_sd_pd_idx(pble_rsrc, idx);
  321. sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
  322. pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
  323. idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
  324. pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
  325. info.chunk = chunk;
  326. info.hmc_info = hmc_info;
  327. info.pages = pages;
  328. info.sd_entry = sd_entry;
  329. if (!sd_entry->valid) {
  330. sd_entry_type = (!idx->rel_pd_idx &&
  331. (pages == I40IW_HMC_PD_CNT_IN_SD) &&
  332. dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
  333. } else {
  334. sd_entry_type = sd_entry->entry_type;
  335. }
  336. i40iw_debug(dev, I40IW_DEBUG_PBLE,
  337. "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
  338. pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
  339. i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
  340. sd_entry_type, sd_entry->valid);
  341. if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
  342. ret_code = add_sd_direct(dev, pble_rsrc, &info);
  343. if (ret_code)
  344. sd_entry_type = I40IW_SD_TYPE_PAGED;
  345. else
  346. pble_rsrc->stats_direct_sds++;
  347. if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
  348. ret_code = add_bp_pages(dev, pble_rsrc, &info);
  349. if (ret_code)
  350. goto error;
  351. else
  352. pble_rsrc->stats_paged_sds++;
  353. }
  354. if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
  355. (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
  356. i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
  357. ret_code = I40IW_ERR_NO_MEMORY;
  358. goto error;
  359. }
  360. pble_rsrc->next_fpm_addr += chunk->size;
  361. i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
  362. pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
  363. pble_rsrc->unallocated_pble -= (chunk->size >> 3);
  364. sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
  365. sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
  366. if (dev->is_pf && !sd_entry->valid) {
  367. ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
  368. sd_reg_val, idx->sd_idx,
  369. sd_entry->entry_type, true);
  370. if (ret_code) {
  371. i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
  372. goto error;
  373. }
  374. }
  375. sd_entry->valid = true;
  376. list_add(&chunk->list, &pble_rsrc->pinfo.clist);
  377. return 0;
  378. error:
  379. kfree(chunk);
  380. return ret_code;
  381. }
  382. /**
  383. * free_lvl2 - fee level 2 pble
  384. * @pble_rsrc: pble resource management
  385. * @palloc: level 2 pble allocation
  386. */
  387. static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  388. struct i40iw_pble_alloc *palloc)
  389. {
  390. u32 i;
  391. struct gen_pool *pool;
  392. struct i40iw_pble_level2 *lvl2 = &palloc->level2;
  393. struct i40iw_pble_info *root = &lvl2->root;
  394. struct i40iw_pble_info *leaf = lvl2->leaf;
  395. pool = pble_rsrc->pinfo.pool;
  396. for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
  397. if (leaf->addr)
  398. gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
  399. else
  400. break;
  401. }
  402. if (root->addr)
  403. gen_pool_free(pool, root->addr, (root->cnt << 3));
  404. kfree(lvl2->leaf);
  405. lvl2->leaf = NULL;
  406. }
  407. /**
  408. * get_lvl2_pble - get level 2 pble resource
  409. * @pble_rsrc: pble resource management
  410. * @palloc: level 2 pble allocation
  411. * @pool: pool pointer
  412. */
  413. static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  414. struct i40iw_pble_alloc *palloc,
  415. struct gen_pool *pool)
  416. {
  417. u32 lf4k, lflast, total, i;
  418. u32 pblcnt = PBLE_PER_PAGE;
  419. u64 *addr;
  420. struct i40iw_pble_level2 *lvl2 = &palloc->level2;
  421. struct i40iw_pble_info *root = &lvl2->root;
  422. struct i40iw_pble_info *leaf;
  423. /* number of full 512 (4K) leafs) */
  424. lf4k = palloc->total_cnt >> 9;
  425. lflast = palloc->total_cnt % PBLE_PER_PAGE;
  426. total = (lflast == 0) ? lf4k : lf4k + 1;
  427. lvl2->leaf_cnt = total;
  428. leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
  429. if (!leaf)
  430. return I40IW_ERR_NO_MEMORY;
  431. lvl2->leaf = leaf;
  432. /* allocate pbles for the root */
  433. root->addr = gen_pool_alloc(pool, (total << 3));
  434. if (!root->addr) {
  435. kfree(lvl2->leaf);
  436. lvl2->leaf = NULL;
  437. return I40IW_ERR_NO_MEMORY;
  438. }
  439. root->idx = fpm_to_idx(pble_rsrc,
  440. (u64)gen_pool_virt_to_phys(pool, root->addr));
  441. root->cnt = total;
  442. addr = (u64 *)root->addr;
  443. for (i = 0; i < total; i++, leaf++) {
  444. pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
  445. leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
  446. if (!leaf->addr)
  447. goto error;
  448. leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
  449. leaf->cnt = pblcnt;
  450. *addr = (u64)leaf->idx;
  451. addr++;
  452. }
  453. palloc->level = I40IW_LEVEL_2;
  454. pble_rsrc->stats_lvl2++;
  455. return 0;
  456. error:
  457. free_lvl2(pble_rsrc, palloc);
  458. return I40IW_ERR_NO_MEMORY;
  459. }
  460. /**
  461. * get_lvl1_pble - get level 1 pble resource
  462. * @dev: hardware control device structure
  463. * @pble_rsrc: pble resource management
  464. * @palloc: level 1 pble allocation
  465. */
  466. static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
  467. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  468. struct i40iw_pble_alloc *palloc)
  469. {
  470. u64 *addr;
  471. struct gen_pool *pool;
  472. struct i40iw_pble_info *lvl1 = &palloc->level1;
  473. pool = pble_rsrc->pinfo.pool;
  474. addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
  475. if (!addr)
  476. return I40IW_ERR_NO_MEMORY;
  477. palloc->level = I40IW_LEVEL_1;
  478. lvl1->addr = (unsigned long)addr;
  479. lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
  480. (unsigned long)addr));
  481. lvl1->cnt = palloc->total_cnt;
  482. pble_rsrc->stats_lvl1++;
  483. return 0;
  484. }
  485. /**
  486. * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
  487. * @dev: i40iw_sc_dev struct
  488. * @pble_rsrc: pble resources
  489. * @palloc: contains all inforamtion regarding pble (idx + pble addr)
  490. * @pool: pointer to general purpose special memory pool descriptor
  491. */
  492. static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
  493. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  494. struct i40iw_pble_alloc *palloc,
  495. struct gen_pool *pool)
  496. {
  497. enum i40iw_status_code status = 0;
  498. status = get_lvl1_pble(dev, pble_rsrc, palloc);
  499. if (status && (palloc->total_cnt > PBLE_PER_PAGE))
  500. status = get_lvl2_pble(pble_rsrc, palloc, pool);
  501. return status;
  502. }
  503. /**
  504. * i40iw_get_pble - allocate pbles from the pool
  505. * @dev: i40iw_sc_dev struct
  506. * @pble_rsrc: pble resources
  507. * @palloc: contains all inforamtion regarding pble (idx + pble addr)
  508. * @pble_cnt: #of pbles requested
  509. */
  510. enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
  511. struct i40iw_hmc_pble_rsrc *pble_rsrc,
  512. struct i40iw_pble_alloc *palloc,
  513. u32 pble_cnt)
  514. {
  515. struct gen_pool *pool;
  516. enum i40iw_status_code status = 0;
  517. u32 max_sds = 0;
  518. int i;
  519. pool = pble_rsrc->pinfo.pool;
  520. palloc->total_cnt = pble_cnt;
  521. palloc->level = I40IW_LEVEL_0;
  522. /*check first to see if we can get pble's without acquiring additional sd's */
  523. status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
  524. if (!status)
  525. goto exit;
  526. max_sds = (palloc->total_cnt >> 18) + 1;
  527. for (i = 0; i < max_sds; i++) {
  528. status = add_pble_pool(dev, pble_rsrc);
  529. if (status)
  530. break;
  531. status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
  532. if (!status)
  533. break;
  534. }
  535. exit:
  536. if (!status)
  537. pble_rsrc->stats_alloc_ok++;
  538. else
  539. pble_rsrc->stats_alloc_fail++;
  540. return status;
  541. }
  542. /**
  543. * i40iw_free_pble - put pbles back into pool
  544. * @pble_rsrc: pble resources
  545. * @palloc: contains all inforamtion regarding pble resource being freed
  546. */
  547. void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
  548. struct i40iw_pble_alloc *palloc)
  549. {
  550. struct gen_pool *pool;
  551. pool = pble_rsrc->pinfo.pool;
  552. if (palloc->level == I40IW_LEVEL_2)
  553. free_lvl2(pble_rsrc, palloc);
  554. else
  555. gen_pool_free(pool, palloc->level1.addr,
  556. (palloc->level1.cnt << 3));
  557. pble_rsrc->stats_alloc_freed++;
  558. }