hci.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /* SPDX-License-Identifier: BSD-3-Clause */
  2. /*
  3. * Copyright (c) 2020, MIPI Alliance, Inc.
  4. *
  5. * Author: Nicolas Pitre <npitre@baylibre.com>
  6. *
  7. * Common HCI stuff
  8. */
  9. #ifndef HCI_H
  10. #define HCI_H
  11. #include <linux/io.h>
  12. /* Handy logging macro to save on line length */
  13. #define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
  14. /* 32-bit word aware bit and mask macros */
  15. #define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
  16. #define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
  17. #define W2_MASK(h, l) GENMASK((h) - 64, (l) - 64)
  18. #define W3_MASK(h, l) GENMASK((h) - 96, (l) - 96)
  19. /* Same for single bit macros (trailing _ to align with W*_MASK width) */
  20. #define W0_BIT_(x) BIT((x) - 0)
  21. #define W1_BIT_(x) BIT((x) - 32)
  22. #define W2_BIT_(x) BIT((x) - 64)
  23. #define W3_BIT_(x) BIT((x) - 96)
  24. #define reg_read(r) readl(hci->base_regs + (r))
  25. #define reg_write(r, v) writel(v, hci->base_regs + (r))
  26. #define reg_set(r, v) reg_write(r, reg_read(r) | (v))
  27. #define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
  28. struct hci_cmd_ops;
  29. /* Our main structure */
  30. struct i3c_hci {
  31. struct i3c_master_controller master;
  32. void __iomem *base_regs;
  33. void __iomem *DAT_regs;
  34. void __iomem *DCT_regs;
  35. void __iomem *RHS_regs;
  36. void __iomem *PIO_regs;
  37. void __iomem *EXTCAPS_regs;
  38. void __iomem *AUTOCMD_regs;
  39. void __iomem *DEBUG_regs;
  40. const struct hci_io_ops *io;
  41. void *io_data;
  42. const struct hci_cmd_ops *cmd;
  43. atomic_t next_cmd_tid;
  44. u32 caps;
  45. unsigned int quirks;
  46. unsigned int DAT_entries;
  47. unsigned int DAT_entry_size;
  48. void *DAT_data;
  49. unsigned int DCT_entries;
  50. unsigned int DCT_entry_size;
  51. u8 version_major;
  52. u8 version_minor;
  53. u8 revision;
  54. u32 vendor_mipi_id;
  55. u32 vendor_version_id;
  56. u32 vendor_product_id;
  57. void *vendor_data;
  58. };
  59. /*
  60. * Structure to represent a master initiated transfer.
  61. * The rnw, data and data_len fields must be initialized before calling any
  62. * hci->cmd->*() method. The cmd method will initialize cmd_desc[] and
  63. * possibly modify (clear) the data field. Then xfer->cmd_desc[0] can
  64. * be augmented with CMD_0_ROC and/or CMD_0_TOC.
  65. * The completion field needs to be initialized before queueing with
  66. * hci->io->queue_xfer(), and requires CMD_0_ROC to be set.
  67. */
  68. struct hci_xfer {
  69. u32 cmd_desc[4];
  70. u32 response;
  71. bool rnw;
  72. void *data;
  73. unsigned int data_len;
  74. unsigned int cmd_tid;
  75. struct completion *completion;
  76. union {
  77. struct {
  78. /* PIO specific */
  79. struct hci_xfer *next_xfer;
  80. struct hci_xfer *next_data;
  81. struct hci_xfer *next_resp;
  82. unsigned int data_left;
  83. u32 data_word_before_partial;
  84. };
  85. struct {
  86. /* DMA specific */
  87. dma_addr_t data_dma;
  88. void *bounce_buf;
  89. int ring_number;
  90. int ring_entry;
  91. };
  92. };
  93. };
  94. static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
  95. {
  96. return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL);
  97. }
  98. static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
  99. {
  100. kfree(xfer);
  101. }
  102. /* This abstracts PIO vs DMA operations */
  103. struct hci_io_ops {
  104. bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask);
  105. int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
  106. bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
  107. int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
  108. const struct i3c_ibi_setup *req);
  109. void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
  110. void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
  111. struct i3c_ibi_slot *slot);
  112. int (*init)(struct i3c_hci *hci);
  113. void (*cleanup)(struct i3c_hci *hci);
  114. };
  115. extern const struct hci_io_ops mipi_i3c_hci_pio;
  116. extern const struct hci_io_ops mipi_i3c_hci_dma;
  117. /* Our per device master private data */
  118. struct i3c_hci_dev_data {
  119. int dat_idx;
  120. void *ibi_data;
  121. };
  122. /* list of quirks */
  123. #define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
  124. #define HCI_QUIRK_PIO_MODE BIT(2) /* Set PIO mode for AMD platforms */
  125. #define HCI_QUIRK_OD_PP_TIMING BIT(3) /* Set OD and PP timings for AMD platforms */
  126. #define HCI_QUIRK_RESP_BUF_THLD BIT(4) /* Set resp buf thld to 0 for AMD platforms */
  127. /* global functions */
  128. void mipi_i3c_hci_resume(struct i3c_hci *hci);
  129. void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
  130. void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
  131. void amd_set_od_pp_timing(struct i3c_hci *hci);
  132. void amd_set_resp_buf_thld(struct i3c_hci *hci);
  133. #endif