ivpu_ipc.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2020-2024 Intel Corporation
  4. */
  5. #ifndef __IVPU_IPC_H__
  6. #define __IVPU_IPC_H__
  7. #include <linux/interrupt.h>
  8. #include <linux/spinlock.h>
  9. #include "vpu_jsm_api.h"
  10. struct ivpu_bo;
  11. /* VPU FW boot notification */
  12. #define IVPU_IPC_CHAN_BOOT_MSG 0x3ff
  13. #define IVPU_IPC_BOOT_MSG_DATA_ADDR 0x424f4f54
  14. /* The alignment to be used for IPC Buffers and IPC Data. */
  15. #define IVPU_IPC_ALIGNMENT 64
  16. #define IVPU_IPC_HDR_FREE 0
  17. #define IVPU_IPC_HDR_ALLOCATED 1
  18. /**
  19. * struct ivpu_ipc_hdr - The IPC message header structure, exchanged
  20. * with the VPU device firmware.
  21. * @data_addr: The VPU address of the payload (JSM message)
  22. * @data_size: The size of the payload.
  23. * @channel: The channel used.
  24. * @src_node: The Node ID of the sender.
  25. * @dst_node: The Node ID of the intended receiver.
  26. * @status: IPC buffer usage status
  27. */
  28. struct ivpu_ipc_hdr {
  29. u32 data_addr;
  30. u32 data_size;
  31. u16 channel;
  32. u8 src_node;
  33. u8 dst_node;
  34. u8 status;
  35. } __packed __aligned(IVPU_IPC_ALIGNMENT);
  36. typedef void (*ivpu_ipc_rx_callback_t)(struct ivpu_device *vdev,
  37. struct ivpu_ipc_hdr *ipc_hdr,
  38. struct vpu_jsm_msg *jsm_msg);
  39. struct ivpu_ipc_rx_msg {
  40. struct list_head link;
  41. struct ivpu_ipc_hdr *ipc_hdr;
  42. struct vpu_jsm_msg *jsm_msg;
  43. ivpu_ipc_rx_callback_t callback;
  44. };
  45. struct ivpu_ipc_consumer {
  46. struct list_head link;
  47. u32 channel;
  48. u32 tx_vpu_addr;
  49. u32 request_id;
  50. bool aborted;
  51. ivpu_ipc_rx_callback_t rx_callback;
  52. spinlock_t rx_lock; /* Protects rx_msg_list and aborted */
  53. struct list_head rx_msg_list;
  54. wait_queue_head_t rx_msg_wq;
  55. };
  56. struct ivpu_ipc_info {
  57. struct gen_pool *mm_tx;
  58. struct ivpu_bo *mem_tx;
  59. struct ivpu_bo *mem_rx;
  60. atomic_t rx_msg_count;
  61. spinlock_t cons_lock; /* Protects cons_list and cb_msg_list */
  62. struct list_head cons_list;
  63. struct list_head cb_msg_list;
  64. atomic_t request_id;
  65. struct mutex lock; /* Lock on status */
  66. bool on;
  67. };
  68. int ivpu_ipc_init(struct ivpu_device *vdev);
  69. void ivpu_ipc_fini(struct ivpu_device *vdev);
  70. void ivpu_ipc_enable(struct ivpu_device *vdev);
  71. void ivpu_ipc_disable(struct ivpu_device *vdev);
  72. void ivpu_ipc_reset(struct ivpu_device *vdev);
  73. void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
  74. void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
  75. void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
  76. u32 channel, ivpu_ipc_rx_callback_t callback);
  77. void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
  78. int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
  79. struct vpu_jsm_msg *req);
  80. int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
  81. struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
  82. unsigned long timeout_ms);
  83. int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
  84. enum vpu_ipc_msg_type expected_resp_type,
  85. struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms);
  86. int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
  87. enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
  88. u32 channel, unsigned long timeout_ms);
  89. #endif /* __IVPU_IPC_H__ */