cptvf_reqmanager.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * Copyright (C) 2016 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include "cptvf.h"
  9. #include "request_manager.h"
  10. /**
  11. * get_free_pending_entry - get free entry from pending queue
  12. * @param pqinfo: pending_qinfo structure
  13. * @param qno: queue number
  14. */
  15. static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
  16. int qlen)
  17. {
  18. struct pending_entry *ent = NULL;
  19. ent = &q->head[q->rear];
  20. if (unlikely(ent->busy)) {
  21. ent = NULL;
  22. goto no_free_entry;
  23. }
  24. q->rear++;
  25. if (unlikely(q->rear == qlen))
  26. q->rear = 0;
  27. no_free_entry:
  28. return ent;
  29. }
  30. static inline void pending_queue_inc_front(struct pending_qinfo *pqinfo,
  31. int qno)
  32. {
  33. struct pending_queue *queue = &pqinfo->queue[qno];
  34. queue->front++;
  35. if (unlikely(queue->front == pqinfo->qlen))
  36. queue->front = 0;
  37. }
  38. static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list,
  39. int buf_count, u8 *buffer)
  40. {
  41. int ret = 0, i, j;
  42. int components;
  43. struct sglist_component *sg_ptr = NULL;
  44. struct pci_dev *pdev = cptvf->pdev;
  45. if (unlikely(!list)) {
  46. dev_err(&pdev->dev, "Input List pointer is NULL\n");
  47. return -EFAULT;
  48. }
  49. for (i = 0; i < buf_count; i++) {
  50. if (likely(list[i].vptr)) {
  51. list[i].dma_addr = dma_map_single(&pdev->dev,
  52. list[i].vptr,
  53. list[i].size,
  54. DMA_BIDIRECTIONAL);
  55. if (unlikely(dma_mapping_error(&pdev->dev,
  56. list[i].dma_addr))) {
  57. dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n",
  58. i);
  59. ret = -EIO;
  60. goto sg_cleanup;
  61. }
  62. }
  63. }
  64. components = buf_count / 4;
  65. sg_ptr = (struct sglist_component *)buffer;
  66. for (i = 0; i < components; i++) {
  67. sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
  68. sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
  69. sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
  70. sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
  71. sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
  72. sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
  73. sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
  74. sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
  75. sg_ptr++;
  76. }
  77. components = buf_count % 4;
  78. switch (components) {
  79. case 3:
  80. sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
  81. sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
  82. /* Fall through */
  83. case 2:
  84. sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
  85. sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
  86. /* Fall through */
  87. case 1:
  88. sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
  89. sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
  90. break;
  91. default:
  92. break;
  93. }
  94. return ret;
  95. sg_cleanup:
  96. for (j = 0; j < i; j++) {
  97. if (list[j].dma_addr) {
  98. dma_unmap_single(&pdev->dev, list[i].dma_addr,
  99. list[i].size, DMA_BIDIRECTIONAL);
  100. }
  101. list[j].dma_addr = 0;
  102. }
  103. return ret;
  104. }
  105. static inline int setup_sgio_list(struct cpt_vf *cptvf,
  106. struct cpt_info_buffer *info,
  107. struct cpt_request_info *req)
  108. {
  109. u16 g_sz_bytes = 0, s_sz_bytes = 0;
  110. int ret = 0;
  111. struct pci_dev *pdev = cptvf->pdev;
  112. if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) {
  113. dev_err(&pdev->dev, "Request SG components are higher than supported\n");
  114. ret = -EINVAL;
  115. goto scatter_gather_clean;
  116. }
  117. /* Setup gather (input) components */
  118. g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
  119. info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
  120. if (!info->gather_components) {
  121. ret = -ENOMEM;
  122. goto scatter_gather_clean;
  123. }
  124. ret = setup_sgio_components(cptvf, req->in,
  125. req->incnt,
  126. info->gather_components);
  127. if (ret) {
  128. dev_err(&pdev->dev, "Failed to setup gather list\n");
  129. ret = -EFAULT;
  130. goto scatter_gather_clean;
  131. }
  132. /* Setup scatter (output) components */
  133. s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
  134. info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
  135. if (!info->scatter_components) {
  136. ret = -ENOMEM;
  137. goto scatter_gather_clean;
  138. }
  139. ret = setup_sgio_components(cptvf, req->out,
  140. req->outcnt,
  141. info->scatter_components);
  142. if (ret) {
  143. dev_err(&pdev->dev, "Failed to setup gather list\n");
  144. ret = -EFAULT;
  145. goto scatter_gather_clean;
  146. }
  147. /* Create and initialize DPTR */
  148. info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
  149. info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
  150. if (!info->in_buffer) {
  151. ret = -ENOMEM;
  152. goto scatter_gather_clean;
  153. }
  154. ((u16 *)info->in_buffer)[0] = req->outcnt;
  155. ((u16 *)info->in_buffer)[1] = req->incnt;
  156. ((u16 *)info->in_buffer)[2] = 0;
  157. ((u16 *)info->in_buffer)[3] = 0;
  158. *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
  159. memcpy(&info->in_buffer[8], info->gather_components,
  160. g_sz_bytes);
  161. memcpy(&info->in_buffer[8 + g_sz_bytes],
  162. info->scatter_components, s_sz_bytes);
  163. info->dptr_baddr = dma_map_single(&pdev->dev,
  164. (void *)info->in_buffer,
  165. info->dlen,
  166. DMA_BIDIRECTIONAL);
  167. if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) {
  168. dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen);
  169. ret = -EIO;
  170. goto scatter_gather_clean;
  171. }
  172. /* Create and initialize RPTR */
  173. info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
  174. if (!info->out_buffer) {
  175. ret = -ENOMEM;
  176. goto scatter_gather_clean;
  177. }
  178. *((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT);
  179. info->alternate_caddr = (u64 *)info->out_buffer;
  180. info->rptr_baddr = dma_map_single(&pdev->dev,
  181. (void *)info->out_buffer,
  182. COMPLETION_CODE_SIZE,
  183. DMA_BIDIRECTIONAL);
  184. if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) {
  185. dev_err(&pdev->dev, "Mapping RPTR Failed %d\n",
  186. COMPLETION_CODE_SIZE);
  187. ret = -EIO;
  188. goto scatter_gather_clean;
  189. }
  190. return 0;
  191. scatter_gather_clean:
  192. return ret;
  193. }
  194. int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
  195. u32 qno)
  196. {
  197. struct pci_dev *pdev = cptvf->pdev;
  198. struct command_qinfo *qinfo = NULL;
  199. struct command_queue *queue;
  200. struct command_chunk *chunk;
  201. u8 *ent;
  202. int ret = 0;
  203. if (unlikely(qno >= cptvf->nr_queues)) {
  204. dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n",
  205. qno, cptvf->nr_queues);
  206. return -EINVAL;
  207. }
  208. qinfo = &cptvf->cqinfo;
  209. queue = &qinfo->queue[qno];
  210. /* lock commad queue */
  211. spin_lock(&queue->lock);
  212. ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
  213. memcpy(ent, (void *)cmd, qinfo->cmd_size);
  214. if (++queue->idx >= queue->qhead->size / 64) {
  215. struct hlist_node *node;
  216. hlist_for_each(node, &queue->chead) {
  217. chunk = hlist_entry(node, struct command_chunk,
  218. nextchunk);
  219. if (chunk == queue->qhead) {
  220. continue;
  221. } else {
  222. queue->qhead = chunk;
  223. break;
  224. }
  225. }
  226. queue->idx = 0;
  227. }
  228. /* make sure all memory stores are done before ringing doorbell */
  229. smp_wmb();
  230. cptvf_write_vq_doorbell(cptvf, 1);
  231. /* unlock command queue */
  232. spin_unlock(&queue->lock);
  233. return ret;
  234. }
  235. void do_request_cleanup(struct cpt_vf *cptvf,
  236. struct cpt_info_buffer *info)
  237. {
  238. int i;
  239. struct pci_dev *pdev = cptvf->pdev;
  240. struct cpt_request_info *req;
  241. if (info->dptr_baddr)
  242. dma_unmap_single(&pdev->dev, info->dptr_baddr,
  243. info->dlen, DMA_BIDIRECTIONAL);
  244. if (info->rptr_baddr)
  245. dma_unmap_single(&pdev->dev, info->rptr_baddr,
  246. COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL);
  247. if (info->comp_baddr)
  248. dma_unmap_single(&pdev->dev, info->comp_baddr,
  249. sizeof(union cpt_res_s), DMA_BIDIRECTIONAL);
  250. if (info->req) {
  251. req = info->req;
  252. for (i = 0; i < req->outcnt; i++) {
  253. if (req->out[i].dma_addr)
  254. dma_unmap_single(&pdev->dev,
  255. req->out[i].dma_addr,
  256. req->out[i].size,
  257. DMA_BIDIRECTIONAL);
  258. }
  259. for (i = 0; i < req->incnt; i++) {
  260. if (req->in[i].dma_addr)
  261. dma_unmap_single(&pdev->dev,
  262. req->in[i].dma_addr,
  263. req->in[i].size,
  264. DMA_BIDIRECTIONAL);
  265. }
  266. }
  267. if (info->scatter_components)
  268. kzfree(info->scatter_components);
  269. if (info->gather_components)
  270. kzfree(info->gather_components);
  271. if (info->out_buffer)
  272. kzfree(info->out_buffer);
  273. if (info->in_buffer)
  274. kzfree(info->in_buffer);
  275. if (info->completion_addr)
  276. kzfree((void *)info->completion_addr);
  277. kzfree(info);
  278. }
  279. void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
  280. {
  281. struct pci_dev *pdev = cptvf->pdev;
  282. if (!info) {
  283. dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n");
  284. return;
  285. }
  286. do_request_cleanup(cptvf, info);
  287. }
  288. static inline void process_pending_queue(struct cpt_vf *cptvf,
  289. struct pending_qinfo *pqinfo,
  290. int qno)
  291. {
  292. struct pci_dev *pdev = cptvf->pdev;
  293. struct pending_queue *pqueue = &pqinfo->queue[qno];
  294. struct pending_entry *pentry = NULL;
  295. struct cpt_info_buffer *info = NULL;
  296. union cpt_res_s *status = NULL;
  297. unsigned char ccode;
  298. while (1) {
  299. spin_lock_bh(&pqueue->lock);
  300. pentry = &pqueue->head[pqueue->front];
  301. if (unlikely(!pentry->busy)) {
  302. spin_unlock_bh(&pqueue->lock);
  303. break;
  304. }
  305. info = (struct cpt_info_buffer *)pentry->post_arg;
  306. if (unlikely(!info)) {
  307. dev_err(&pdev->dev, "Pending Entry post arg NULL\n");
  308. pending_queue_inc_front(pqinfo, qno);
  309. spin_unlock_bh(&pqueue->lock);
  310. continue;
  311. }
  312. status = (union cpt_res_s *)pentry->completion_addr;
  313. ccode = status->s.compcode;
  314. if ((status->s.compcode == CPT_COMP_E_FAULT) ||
  315. (status->s.compcode == CPT_COMP_E_SWERR)) {
  316. dev_err(&pdev->dev, "Request failed with %s\n",
  317. (status->s.compcode == CPT_COMP_E_FAULT) ?
  318. "DMA Fault" : "Software error");
  319. pentry->completion_addr = NULL;
  320. pentry->busy = false;
  321. atomic64_dec((&pqueue->pending_count));
  322. pentry->post_arg = NULL;
  323. pending_queue_inc_front(pqinfo, qno);
  324. do_request_cleanup(cptvf, info);
  325. spin_unlock_bh(&pqueue->lock);
  326. break;
  327. } else if (status->s.compcode == COMPLETION_CODE_INIT) {
  328. /* check for timeout */
  329. if (time_after_eq(jiffies,
  330. (info->time_in +
  331. (CPT_COMMAND_TIMEOUT * HZ)))) {
  332. dev_err(&pdev->dev, "Request timed out");
  333. pentry->completion_addr = NULL;
  334. pentry->busy = false;
  335. atomic64_dec((&pqueue->pending_count));
  336. pentry->post_arg = NULL;
  337. pending_queue_inc_front(pqinfo, qno);
  338. do_request_cleanup(cptvf, info);
  339. spin_unlock_bh(&pqueue->lock);
  340. break;
  341. } else if ((*info->alternate_caddr ==
  342. (~COMPLETION_CODE_INIT)) &&
  343. (info->extra_time < TIME_IN_RESET_COUNT)) {
  344. info->time_in = jiffies;
  345. info->extra_time++;
  346. spin_unlock_bh(&pqueue->lock);
  347. break;
  348. }
  349. }
  350. pentry->completion_addr = NULL;
  351. pentry->busy = false;
  352. pentry->post_arg = NULL;
  353. atomic64_dec((&pqueue->pending_count));
  354. pending_queue_inc_front(pqinfo, qno);
  355. spin_unlock_bh(&pqueue->lock);
  356. do_post_process(info->cptvf, info);
  357. /*
  358. * Calling callback after we find
  359. * that the request has been serviced
  360. */
  361. pentry->callback(ccode, pentry->callback_arg);
  362. }
  363. }
  364. int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
  365. {
  366. int ret = 0, clear = 0, queue = 0;
  367. struct cpt_info_buffer *info = NULL;
  368. struct cptvf_request *cpt_req = NULL;
  369. union ctrl_info *ctrl = NULL;
  370. union cpt_res_s *result = NULL;
  371. struct pending_entry *pentry = NULL;
  372. struct pending_queue *pqueue = NULL;
  373. struct pci_dev *pdev = cptvf->pdev;
  374. u8 group = 0;
  375. struct cpt_vq_command vq_cmd;
  376. union cpt_inst_s cptinst;
  377. info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
  378. if (unlikely(!info)) {
  379. dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
  380. return -ENOMEM;
  381. }
  382. cpt_req = (struct cptvf_request *)&req->req;
  383. ctrl = (union ctrl_info *)&req->ctrl;
  384. info->cptvf = cptvf;
  385. group = ctrl->s.grp;
  386. ret = setup_sgio_list(cptvf, info, req);
  387. if (ret) {
  388. dev_err(&pdev->dev, "Setting up SG list failed");
  389. goto request_cleanup;
  390. }
  391. cpt_req->dlen = info->dlen;
  392. /*
  393. * Get buffer for union cpt_res_s response
  394. * structure and its physical address
  395. */
  396. info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
  397. if (unlikely(!info->completion_addr)) {
  398. dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
  399. ret = -ENOMEM;
  400. goto request_cleanup;
  401. }
  402. result = (union cpt_res_s *)info->completion_addr;
  403. result->s.compcode = COMPLETION_CODE_INIT;
  404. info->comp_baddr = dma_map_single(&pdev->dev,
  405. (void *)info->completion_addr,
  406. sizeof(union cpt_res_s),
  407. DMA_BIDIRECTIONAL);
  408. if (dma_mapping_error(&pdev->dev, info->comp_baddr)) {
  409. dev_err(&pdev->dev, "mapping compptr Failed %lu\n",
  410. sizeof(union cpt_res_s));
  411. ret = -EFAULT;
  412. goto request_cleanup;
  413. }
  414. /* Fill the VQ command */
  415. vq_cmd.cmd.u64 = 0;
  416. vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
  417. vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
  418. vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
  419. vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
  420. /* 64-bit swap for microcode data reads, not needed for addresses*/
  421. vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64);
  422. vq_cmd.dptr = info->dptr_baddr;
  423. vq_cmd.rptr = info->rptr_baddr;
  424. vq_cmd.cptr.u64 = 0;
  425. vq_cmd.cptr.s.grp = group;
  426. /* Get Pending Entry to submit command */
  427. /* Always queue 0, because 1 queue per VF */
  428. queue = 0;
  429. pqueue = &cptvf->pqinfo.queue[queue];
  430. if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) {
  431. dev_err(&pdev->dev, "pending threshold reached\n");
  432. process_pending_queue(cptvf, &cptvf->pqinfo, queue);
  433. }
  434. get_pending_entry:
  435. spin_lock_bh(&pqueue->lock);
  436. pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen);
  437. if (unlikely(!pentry)) {
  438. spin_unlock_bh(&pqueue->lock);
  439. if (clear == 0) {
  440. process_pending_queue(cptvf, &cptvf->pqinfo, queue);
  441. clear = 1;
  442. goto get_pending_entry;
  443. }
  444. dev_err(&pdev->dev, "Get free entry failed\n");
  445. dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n",
  446. queue, pqueue->rear, pqueue->front);
  447. ret = -EFAULT;
  448. goto request_cleanup;
  449. }
  450. pentry->completion_addr = info->completion_addr;
  451. pentry->post_arg = (void *)info;
  452. pentry->callback = req->callback;
  453. pentry->callback_arg = req->callback_arg;
  454. info->pentry = pentry;
  455. pentry->busy = true;
  456. atomic64_inc(&pqueue->pending_count);
  457. /* Send CPT command */
  458. info->pentry = pentry;
  459. info->time_in = jiffies;
  460. info->req = req;
  461. /* Create the CPT_INST_S type command for HW intrepretation */
  462. cptinst.s.doneint = true;
  463. cptinst.s.res_addr = (u64)info->comp_baddr;
  464. cptinst.s.tag = 0;
  465. cptinst.s.grp = 0;
  466. cptinst.s.wq_ptr = 0;
  467. cptinst.s.ei0 = vq_cmd.cmd.u64;
  468. cptinst.s.ei1 = vq_cmd.dptr;
  469. cptinst.s.ei2 = vq_cmd.rptr;
  470. cptinst.s.ei3 = vq_cmd.cptr.u64;
  471. ret = send_cpt_command(cptvf, &cptinst, queue);
  472. spin_unlock_bh(&pqueue->lock);
  473. if (unlikely(ret)) {
  474. dev_err(&pdev->dev, "Send command failed for AE\n");
  475. ret = -EFAULT;
  476. goto request_cleanup;
  477. }
  478. return 0;
  479. request_cleanup:
  480. dev_dbg(&pdev->dev, "Failed to submit CPT command\n");
  481. do_request_cleanup(cptvf, info);
  482. return ret;
  483. }
  484. void vq_post_process(struct cpt_vf *cptvf, u32 qno)
  485. {
  486. struct pci_dev *pdev = cptvf->pdev;
  487. if (unlikely(qno > cptvf->nr_queues)) {
  488. dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n",
  489. qno);
  490. return;
  491. }
  492. process_pending_queue(cptvf, &cptvf->pqinfo, qno);
  493. }
  494. int cptvf_do_request(void *vfdev, struct cpt_request_info *req)
  495. {
  496. struct cpt_vf *cptvf = (struct cpt_vf *)vfdev;
  497. struct pci_dev *pdev = cptvf->pdev;
  498. if (!cpt_device_ready(cptvf)) {
  499. dev_err(&pdev->dev, "CPT Device is not ready");
  500. return -ENODEV;
  501. }
  502. if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) {
  503. dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
  504. cptvf->vfid);
  505. return -EINVAL;
  506. } else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) {
  507. dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
  508. cptvf->vfid);
  509. return -EINVAL;
  510. }
  511. return process_request(cptvf, req);
  512. }