brcmstb_dpfe.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs
  4. *
  5. * Copyright (c) 2017 Broadcom
  6. */
  7. /*
  8. * This driver provides access to the DPFE interface of Broadcom STB SoCs.
  9. * The firmware running on the DCPU inside the DDR PHY can provide current
  10. * information about the system's RAM, for instance the DRAM refresh rate.
  11. * This can be used as an indirect indicator for the DRAM's temperature.
  12. * Slower refresh rate means cooler RAM, higher refresh rate means hotter
  13. * RAM.
  14. *
  15. * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which
  16. * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls.
  17. *
  18. * Note regarding the loading of the firmware image: we use be32_to_cpu()
  19. * and le_32_to_cpu(), so we can support the following four cases:
  20. * - LE kernel + LE firmware image (the most common case)
  21. * - LE kernel + BE firmware image
  22. * - BE kernel + LE firmware image
  23. * - BE kernel + BE firmware image
  24. *
  25. * The DPCU always runs in big endian mode. The firmware image, however, can
  26. * be in either format. Also, communication between host CPU and DCPU is
  27. * always in little endian.
  28. */
  29. #include <linux/delay.h>
  30. #include <linux/firmware.h>
  31. #include <linux/io.h>
  32. #include <linux/module.h>
  33. #include <linux/of.h>
  34. #include <linux/platform_device.h>
  35. #define DRVNAME "brcmstb-dpfe"
  36. /* DCPU register offsets */
  37. #define REG_DCPU_RESET 0x0
  38. #define REG_TO_DCPU_MBOX 0x10
  39. #define REG_TO_HOST_MBOX 0x14
  40. /* Macros to process offsets returned by the DCPU */
  41. #define DRAM_MSG_ADDR_OFFSET 0x0
  42. #define DRAM_MSG_TYPE_OFFSET 0x1c
  43. #define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
  44. #define DRAM_MSG_TYPE_MASK ((1UL << \
  45. (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
  46. /* Message RAM */
  47. #define DCPU_MSG_RAM_START 0x100
  48. #define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
  49. /* DRAM Info Offsets & Masks */
  50. #define DRAM_INFO_INTERVAL 0x0
  51. #define DRAM_INFO_MR4 0x4
  52. #define DRAM_INFO_ERROR 0x8
  53. #define DRAM_INFO_MR4_MASK 0xff
  54. #define DRAM_INFO_MR4_SHIFT 24 /* We need to look at byte 3 */
  55. /* DRAM MR4 Offsets & Masks */
  56. #define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
  57. #define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */
  58. #define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */
  59. #define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */
  60. #define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */
  61. #define DRAM_MR4_REFRESH_MASK 0x7
  62. #define DRAM_MR4_SR_ABORT_MASK 0x1
  63. #define DRAM_MR4_PPRE_MASK 0x1
  64. #define DRAM_MR4_TH_OFFS_MASK 0x3
  65. #define DRAM_MR4_TUF_MASK 0x1
  66. /* DRAM Vendor Offsets & Masks (API v2) */
  67. #define DRAM_VENDOR_MR5 0x0
  68. #define DRAM_VENDOR_MR6 0x4
  69. #define DRAM_VENDOR_MR7 0x8
  70. #define DRAM_VENDOR_MR8 0xc
  71. #define DRAM_VENDOR_ERROR 0x10
  72. #define DRAM_VENDOR_MASK 0xff
  73. #define DRAM_VENDOR_SHIFT 24 /* We need to look at byte 3 */
  74. /* DRAM Information Offsets & Masks (API v3) */
  75. #define DRAM_DDR_INFO_MR4 0x0
  76. #define DRAM_DDR_INFO_MR5 0x4
  77. #define DRAM_DDR_INFO_MR6 0x8
  78. #define DRAM_DDR_INFO_MR7 0xc
  79. #define DRAM_DDR_INFO_MR8 0x10
  80. #define DRAM_DDR_INFO_ERROR 0x14
  81. #define DRAM_DDR_INFO_MASK 0xff
  82. /* Reset register bits & masks */
  83. #define DCPU_RESET_SHIFT 0x0
  84. #define DCPU_RESET_MASK 0x1
  85. #define DCPU_CLK_DISABLE_SHIFT 0x2
  86. /* DCPU return codes */
  87. #define DCPU_RET_ERROR_BIT BIT(31)
  88. #define DCPU_RET_SUCCESS 0x1
  89. #define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
  90. #define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
  91. #define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
  92. #define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
  93. /* This error code is not firmware defined and only used in the driver. */
  94. #define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
  95. /* Firmware magic */
  96. #define DPFE_BE_MAGIC 0xfe1010fe
  97. #define DPFE_LE_MAGIC 0xfe0101fe
  98. /* Error codes */
  99. #define ERR_INVALID_MAGIC -1
  100. #define ERR_INVALID_SIZE -2
  101. #define ERR_INVALID_CHKSUM -3
  102. /* Message types */
  103. #define DPFE_MSG_TYPE_COMMAND 1
  104. #define DPFE_MSG_TYPE_RESPONSE 2
  105. #define DELAY_LOOP_MAX 1000
  106. enum dpfe_msg_fields {
  107. MSG_HEADER,
  108. MSG_COMMAND,
  109. MSG_ARG_COUNT,
  110. MSG_ARG0,
  111. MSG_FIELD_MAX = 16 /* Max number of arguments */
  112. };
  113. enum dpfe_commands {
  114. DPFE_CMD_GET_INFO,
  115. DPFE_CMD_GET_REFRESH,
  116. DPFE_CMD_GET_VENDOR,
  117. DPFE_CMD_MAX /* Last entry */
  118. };
  119. /*
  120. * Format of the binary firmware file:
  121. *
  122. * entry
  123. * 0 header
  124. * value: 0xfe0101fe <== little endian
  125. * 0xfe1010fe <== big endian
  126. * 1 sequence:
  127. * [31:16] total segments on this build
  128. * [15:0] this segment sequence.
  129. * 2 FW version
  130. * 3 IMEM byte size
  131. * 4 DMEM byte size
  132. * IMEM
  133. * DMEM
  134. * last checksum ==> sum of everything
  135. */
  136. struct dpfe_firmware_header {
  137. u32 magic;
  138. u32 sequence;
  139. u32 version;
  140. u32 imem_size;
  141. u32 dmem_size;
  142. };
  143. /* Things we only need during initialization. */
  144. struct init_data {
  145. unsigned int dmem_len;
  146. unsigned int imem_len;
  147. unsigned int chksum;
  148. bool is_big_endian;
  149. };
  150. /* API version and corresponding commands */
  151. struct dpfe_api {
  152. int version;
  153. const char *fw_name;
  154. const struct attribute_group **sysfs_attrs;
  155. u32 command[DPFE_CMD_MAX][MSG_FIELD_MAX];
  156. };
  157. /* Things we need for as long as we are active. */
  158. struct brcmstb_dpfe_priv {
  159. void __iomem *regs;
  160. void __iomem *dmem;
  161. void __iomem *imem;
  162. struct device *dev;
  163. const struct dpfe_api *dpfe_api;
  164. struct mutex lock;
  165. };
  166. /*
  167. * Forward declaration of our sysfs attribute functions, so we can declare the
  168. * attribute data structures early.
  169. */
  170. static ssize_t show_info(struct device *, struct device_attribute *, char *);
  171. static ssize_t show_refresh(struct device *, struct device_attribute *, char *);
  172. static ssize_t store_refresh(struct device *, struct device_attribute *,
  173. const char *, size_t);
  174. static ssize_t show_vendor(struct device *, struct device_attribute *, char *);
  175. static ssize_t show_dram(struct device *, struct device_attribute *, char *);
  176. /*
  177. * Declare our attributes early, so they can be referenced in the API data
  178. * structure. We need to do this, because the attributes depend on the API
  179. * version.
  180. */
  181. static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
  182. static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
  183. static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
  184. static DEVICE_ATTR(dpfe_dram, 0444, show_dram, NULL);
  185. /* API v2 sysfs attributes */
  186. static struct attribute *dpfe_v2_attrs[] = {
  187. &dev_attr_dpfe_info.attr,
  188. &dev_attr_dpfe_refresh.attr,
  189. &dev_attr_dpfe_vendor.attr,
  190. NULL
  191. };
  192. ATTRIBUTE_GROUPS(dpfe_v2);
  193. /* API v3 sysfs attributes */
  194. static struct attribute *dpfe_v3_attrs[] = {
  195. &dev_attr_dpfe_info.attr,
  196. &dev_attr_dpfe_dram.attr,
  197. NULL
  198. };
  199. ATTRIBUTE_GROUPS(dpfe_v3);
  200. /*
  201. * Old API v2 firmware commands, as defined in the rev 0.61 specification, we
  202. * use a version set to 1 to denote that it is not compatible with the new API
  203. * v2 and onwards.
  204. */
  205. static const struct dpfe_api dpfe_api_old_v2 = {
  206. .version = 1,
  207. .fw_name = "dpfe.bin",
  208. .sysfs_attrs = dpfe_v2_groups,
  209. .command = {
  210. [DPFE_CMD_GET_INFO] = {
  211. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  212. [MSG_COMMAND] = 1,
  213. [MSG_ARG_COUNT] = 1,
  214. [MSG_ARG0] = 1,
  215. },
  216. [DPFE_CMD_GET_REFRESH] = {
  217. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  218. [MSG_COMMAND] = 2,
  219. [MSG_ARG_COUNT] = 1,
  220. [MSG_ARG0] = 1,
  221. },
  222. [DPFE_CMD_GET_VENDOR] = {
  223. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  224. [MSG_COMMAND] = 2,
  225. [MSG_ARG_COUNT] = 1,
  226. [MSG_ARG0] = 2,
  227. },
  228. }
  229. };
  230. /*
  231. * API v2 firmware commands, as defined in the rev 0.8 specification, named new
  232. * v2 here
  233. */
  234. static const struct dpfe_api dpfe_api_new_v2 = {
  235. .version = 2,
  236. .fw_name = NULL, /* We expect the firmware to have been downloaded! */
  237. .sysfs_attrs = dpfe_v2_groups,
  238. .command = {
  239. [DPFE_CMD_GET_INFO] = {
  240. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  241. [MSG_COMMAND] = 0x101,
  242. },
  243. [DPFE_CMD_GET_REFRESH] = {
  244. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  245. [MSG_COMMAND] = 0x201,
  246. },
  247. [DPFE_CMD_GET_VENDOR] = {
  248. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  249. [MSG_COMMAND] = 0x202,
  250. },
  251. }
  252. };
  253. /* API v3 firmware commands */
  254. static const struct dpfe_api dpfe_api_v3 = {
  255. .version = 3,
  256. .fw_name = NULL, /* We expect the firmware to have been downloaded! */
  257. .sysfs_attrs = dpfe_v3_groups,
  258. .command = {
  259. [DPFE_CMD_GET_INFO] = {
  260. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  261. [MSG_COMMAND] = 0x0101,
  262. [MSG_ARG_COUNT] = 1,
  263. [MSG_ARG0] = 1,
  264. },
  265. [DPFE_CMD_GET_REFRESH] = {
  266. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  267. [MSG_COMMAND] = 0x0202,
  268. [MSG_ARG_COUNT] = 0,
  269. },
  270. /* There's no GET_VENDOR command in API v3. */
  271. },
  272. };
  273. static const char *get_error_text(unsigned int i)
  274. {
  275. static const char * const error_text[] = {
  276. "Success", "Header code incorrect",
  277. "Unknown command or argument", "Incorrect checksum",
  278. "Malformed command", "Timed out", "Unknown error",
  279. };
  280. if (unlikely(i >= ARRAY_SIZE(error_text)))
  281. i = ARRAY_SIZE(error_text) - 1;
  282. return error_text[i];
  283. }
  284. static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
  285. {
  286. u32 val;
  287. mutex_lock(&priv->lock);
  288. val = readl_relaxed(priv->regs + REG_DCPU_RESET);
  289. mutex_unlock(&priv->lock);
  290. return !(val & DCPU_RESET_MASK);
  291. }
  292. static void __disable_dcpu(struct brcmstb_dpfe_priv *priv)
  293. {
  294. u32 val;
  295. if (!is_dcpu_enabled(priv))
  296. return;
  297. mutex_lock(&priv->lock);
  298. /* Put DCPU in reset if it's running. */
  299. val = readl_relaxed(priv->regs + REG_DCPU_RESET);
  300. val |= (1 << DCPU_RESET_SHIFT);
  301. writel_relaxed(val, priv->regs + REG_DCPU_RESET);
  302. mutex_unlock(&priv->lock);
  303. }
  304. static void __enable_dcpu(struct brcmstb_dpfe_priv *priv)
  305. {
  306. void __iomem *regs = priv->regs;
  307. u32 val;
  308. mutex_lock(&priv->lock);
  309. /* Clear mailbox registers. */
  310. writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
  311. writel_relaxed(0, regs + REG_TO_HOST_MBOX);
  312. /* Disable DCPU clock gating */
  313. val = readl_relaxed(regs + REG_DCPU_RESET);
  314. val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
  315. writel_relaxed(val, regs + REG_DCPU_RESET);
  316. /* Take DCPU out of reset */
  317. val = readl_relaxed(regs + REG_DCPU_RESET);
  318. val &= ~(1 << DCPU_RESET_SHIFT);
  319. writel_relaxed(val, regs + REG_DCPU_RESET);
  320. mutex_unlock(&priv->lock);
  321. }
  322. static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
  323. {
  324. unsigned int sum = 0;
  325. unsigned int i;
  326. /* Don't include the last field in the checksum. */
  327. for (i = 0; i < max; i++)
  328. sum += msg[i];
  329. return sum;
  330. }
  331. static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
  332. char *buf, ssize_t *size)
  333. {
  334. unsigned int msg_type;
  335. unsigned int offset;
  336. void __iomem *ptr = NULL;
  337. /* There is no need to use this function for API v3 or later. */
  338. if (unlikely(priv->dpfe_api->version >= 3))
  339. return NULL;
  340. msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
  341. offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
  342. /*
  343. * msg_type == 1: the offset is relative to the message RAM
  344. * msg_type == 0: the offset is relative to the data RAM (this is the
  345. * previous way of passing data)
  346. * msg_type is anything else: there's critical hardware problem
  347. */
  348. switch (msg_type) {
  349. case 1:
  350. ptr = priv->regs + DCPU_MSG_RAM_START + offset;
  351. break;
  352. case 0:
  353. ptr = priv->dmem + offset;
  354. break;
  355. default:
  356. dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
  357. response);
  358. if (buf && size)
  359. *size = sprintf(buf,
  360. "FATAL: communication error with DCPU\n");
  361. }
  362. return ptr;
  363. }
  364. static void __finalize_command(struct brcmstb_dpfe_priv *priv)
  365. {
  366. unsigned int release_mbox;
  367. /*
  368. * It depends on the API version which MBOX register we have to write to
  369. * signal we are done.
  370. */
  371. release_mbox = (priv->dpfe_api->version < 2)
  372. ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
  373. writel_relaxed(0, priv->regs + release_mbox);
  374. }
  375. static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
  376. u32 result[])
  377. {
  378. void __iomem *regs = priv->regs;
  379. unsigned int i, chksum, chksum_idx;
  380. const u32 *msg;
  381. int ret = 0;
  382. u32 resp;
  383. if (cmd >= DPFE_CMD_MAX)
  384. return -1;
  385. msg = priv->dpfe_api->command[cmd];
  386. mutex_lock(&priv->lock);
  387. /* Wait for DCPU to become ready */
  388. for (i = 0; i < DELAY_LOOP_MAX; i++) {
  389. resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
  390. if (resp == 0)
  391. break;
  392. msleep(1);
  393. }
  394. if (resp != 0) {
  395. mutex_unlock(&priv->lock);
  396. return -ffs(DCPU_RET_ERR_TIMEDOUT);
  397. }
  398. /* Compute checksum over the message */
  399. chksum_idx = msg[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
  400. chksum = get_msg_chksum(msg, chksum_idx);
  401. /* Write command and arguments to message area */
  402. for (i = 0; i < MSG_FIELD_MAX; i++) {
  403. if (i == chksum_idx)
  404. writel_relaxed(chksum, regs + DCPU_MSG_RAM(i));
  405. else
  406. writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
  407. }
  408. /* Tell DCPU there is a command waiting */
  409. writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
  410. /* Wait for DCPU to process the command */
  411. for (i = 0; i < DELAY_LOOP_MAX; i++) {
  412. /* Read response code */
  413. resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
  414. if (resp > 0)
  415. break;
  416. msleep(1);
  417. }
  418. if (i == DELAY_LOOP_MAX) {
  419. resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
  420. ret = -ffs(resp);
  421. } else {
  422. /* Read response data */
  423. for (i = 0; i < MSG_FIELD_MAX; i++)
  424. result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
  425. chksum_idx = result[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
  426. }
  427. /* Tell DCPU we are done */
  428. __finalize_command(priv);
  429. mutex_unlock(&priv->lock);
  430. if (ret)
  431. return ret;
  432. /* Verify response */
  433. chksum = get_msg_chksum(result, chksum_idx);
  434. if (chksum != result[chksum_idx])
  435. resp = DCPU_RET_ERR_CHKSUM;
  436. if (resp != DCPU_RET_SUCCESS) {
  437. resp &= ~DCPU_RET_ERROR_BIT;
  438. ret = -ffs(resp);
  439. }
  440. return ret;
  441. }
  442. /* Ensure that the firmware file loaded meets all the requirements. */
  443. static int __verify_firmware(struct init_data *init,
  444. const struct firmware *fw)
  445. {
  446. const struct dpfe_firmware_header *header = (void *)fw->data;
  447. unsigned int dmem_size, imem_size, total_size;
  448. bool is_big_endian = false;
  449. const u32 *chksum_ptr;
  450. if (header->magic == DPFE_BE_MAGIC)
  451. is_big_endian = true;
  452. else if (header->magic != DPFE_LE_MAGIC)
  453. return ERR_INVALID_MAGIC;
  454. if (is_big_endian) {
  455. dmem_size = be32_to_cpu(header->dmem_size);
  456. imem_size = be32_to_cpu(header->imem_size);
  457. } else {
  458. dmem_size = le32_to_cpu(header->dmem_size);
  459. imem_size = le32_to_cpu(header->imem_size);
  460. }
  461. /* Data and instruction sections are 32 bit words. */
  462. if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
  463. return ERR_INVALID_SIZE;
  464. /*
  465. * The header + the data section + the instruction section + the
  466. * checksum must be equal to the total firmware size.
  467. */
  468. total_size = dmem_size + imem_size + sizeof(*header) +
  469. sizeof(*chksum_ptr);
  470. if (total_size != fw->size)
  471. return ERR_INVALID_SIZE;
  472. /* The checksum comes at the very end. */
  473. chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
  474. init->is_big_endian = is_big_endian;
  475. init->dmem_len = dmem_size;
  476. init->imem_len = imem_size;
  477. init->chksum = (is_big_endian)
  478. ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
  479. return 0;
  480. }
  481. /* Verify checksum by reading back the firmware from co-processor RAM. */
  482. static int __verify_fw_checksum(struct init_data *init,
  483. struct brcmstb_dpfe_priv *priv,
  484. const struct dpfe_firmware_header *header,
  485. u32 checksum)
  486. {
  487. u32 magic, sequence, version, sum;
  488. u32 __iomem *dmem = priv->dmem;
  489. u32 __iomem *imem = priv->imem;
  490. unsigned int i;
  491. if (init->is_big_endian) {
  492. magic = be32_to_cpu(header->magic);
  493. sequence = be32_to_cpu(header->sequence);
  494. version = be32_to_cpu(header->version);
  495. } else {
  496. magic = le32_to_cpu(header->magic);
  497. sequence = le32_to_cpu(header->sequence);
  498. version = le32_to_cpu(header->version);
  499. }
  500. sum = magic + sequence + version + init->dmem_len + init->imem_len;
  501. for (i = 0; i < init->dmem_len / sizeof(u32); i++)
  502. sum += readl_relaxed(dmem + i);
  503. for (i = 0; i < init->imem_len / sizeof(u32); i++)
  504. sum += readl_relaxed(imem + i);
  505. return (sum == checksum) ? 0 : -1;
  506. }
  507. static int __write_firmware(u32 __iomem *mem, const u32 *fw,
  508. unsigned int size, bool is_big_endian)
  509. {
  510. unsigned int i;
  511. /* Convert size to 32-bit words. */
  512. size /= sizeof(u32);
  513. /* It is recommended to clear the firmware area first. */
  514. for (i = 0; i < size; i++)
  515. writel_relaxed(0, mem + i);
  516. /* Now copy it. */
  517. if (is_big_endian) {
  518. for (i = 0; i < size; i++)
  519. writel_relaxed(be32_to_cpu(fw[i]), mem + i);
  520. } else {
  521. for (i = 0; i < size; i++)
  522. writel_relaxed(le32_to_cpu(fw[i]), mem + i);
  523. }
  524. return 0;
  525. }
  526. static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
  527. {
  528. const struct dpfe_firmware_header *header;
  529. unsigned int dmem_size, imem_size;
  530. struct device *dev = priv->dev;
  531. bool is_big_endian = false;
  532. const struct firmware *fw;
  533. const u32 *dmem, *imem;
  534. struct init_data init;
  535. const void *fw_blob;
  536. int ret;
  537. /*
  538. * Skip downloading the firmware if the DCPU is already running and
  539. * responding to commands.
  540. */
  541. if (is_dcpu_enabled(priv)) {
  542. u32 response[MSG_FIELD_MAX];
  543. ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
  544. if (!ret)
  545. return 0;
  546. }
  547. /*
  548. * If the firmware filename is NULL it means the boot firmware has to
  549. * download the DCPU firmware for us. If that didn't work, we have to
  550. * bail, since downloading it ourselves wouldn't work either.
  551. */
  552. if (!priv->dpfe_api->fw_name)
  553. return -ENODEV;
  554. ret = firmware_request_nowarn(&fw, priv->dpfe_api->fw_name, dev);
  555. /*
  556. * Defer the firmware download if the firmware file couldn't be found.
  557. * The root file system may not be available yet.
  558. */
  559. if (ret)
  560. return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
  561. ret = __verify_firmware(&init, fw);
  562. if (ret) {
  563. ret = -EFAULT;
  564. goto release_fw;
  565. }
  566. __disable_dcpu(priv);
  567. is_big_endian = init.is_big_endian;
  568. dmem_size = init.dmem_len;
  569. imem_size = init.imem_len;
  570. /* At the beginning of the firmware blob is a header. */
  571. header = (struct dpfe_firmware_header *)fw->data;
  572. /* Void pointer to the beginning of the actual firmware. */
  573. fw_blob = fw->data + sizeof(*header);
  574. /* IMEM comes right after the header. */
  575. imem = fw_blob;
  576. /* DMEM follows after IMEM. */
  577. dmem = fw_blob + imem_size;
  578. ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
  579. if (ret)
  580. goto release_fw;
  581. ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
  582. if (ret)
  583. goto release_fw;
  584. ret = __verify_fw_checksum(&init, priv, header, init.chksum);
  585. if (ret)
  586. goto release_fw;
  587. __enable_dcpu(priv);
  588. release_fw:
  589. release_firmware(fw);
  590. return ret;
  591. }
  592. static ssize_t generic_show(unsigned int command, u32 response[],
  593. struct brcmstb_dpfe_priv *priv, char *buf)
  594. {
  595. int ret;
  596. if (!priv)
  597. return sprintf(buf, "ERROR: driver private data not set\n");
  598. ret = __send_command(priv, command, response);
  599. if (ret < 0)
  600. return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
  601. return 0;
  602. }
  603. static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
  604. char *buf)
  605. {
  606. u32 response[MSG_FIELD_MAX];
  607. struct brcmstb_dpfe_priv *priv;
  608. unsigned int info;
  609. ssize_t ret;
  610. priv = dev_get_drvdata(dev);
  611. ret = generic_show(DPFE_CMD_GET_INFO, response, priv, buf);
  612. if (ret)
  613. return ret;
  614. info = response[MSG_ARG0];
  615. return sprintf(buf, "%u.%u.%u.%u\n",
  616. (info >> 24) & 0xff,
  617. (info >> 16) & 0xff,
  618. (info >> 8) & 0xff,
  619. info & 0xff);
  620. }
  621. static ssize_t show_refresh(struct device *dev,
  622. struct device_attribute *devattr, char *buf)
  623. {
  624. u32 response[MSG_FIELD_MAX];
  625. void __iomem *info;
  626. struct brcmstb_dpfe_priv *priv;
  627. u8 refresh, sr_abort, ppre, thermal_offs, tuf;
  628. u32 mr4;
  629. ssize_t ret;
  630. priv = dev_get_drvdata(dev);
  631. ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
  632. if (ret)
  633. return ret;
  634. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  635. if (!info)
  636. return ret;
  637. mr4 = (readl_relaxed(info + DRAM_INFO_MR4) >> DRAM_INFO_MR4_SHIFT) &
  638. DRAM_INFO_MR4_MASK;
  639. refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
  640. sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
  641. ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
  642. thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
  643. tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
  644. return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
  645. readl_relaxed(info + DRAM_INFO_INTERVAL),
  646. refresh, sr_abort, ppre, thermal_offs, tuf,
  647. readl_relaxed(info + DRAM_INFO_ERROR));
  648. }
  649. static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
  650. const char *buf, size_t count)
  651. {
  652. u32 response[MSG_FIELD_MAX];
  653. struct brcmstb_dpfe_priv *priv;
  654. void __iomem *info;
  655. unsigned long val;
  656. int ret;
  657. if (kstrtoul(buf, 0, &val) < 0)
  658. return -EINVAL;
  659. priv = dev_get_drvdata(dev);
  660. ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
  661. if (ret)
  662. return ret;
  663. info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
  664. if (!info)
  665. return -EIO;
  666. writel_relaxed(val, info + DRAM_INFO_INTERVAL);
  667. return count;
  668. }
  669. static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
  670. char *buf)
  671. {
  672. u32 response[MSG_FIELD_MAX];
  673. struct brcmstb_dpfe_priv *priv;
  674. void __iomem *info;
  675. ssize_t ret;
  676. u32 mr5, mr6, mr7, mr8, err;
  677. priv = dev_get_drvdata(dev);
  678. ret = generic_show(DPFE_CMD_GET_VENDOR, response, priv, buf);
  679. if (ret)
  680. return ret;
  681. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  682. if (!info)
  683. return ret;
  684. mr5 = (readl_relaxed(info + DRAM_VENDOR_MR5) >> DRAM_VENDOR_SHIFT) &
  685. DRAM_VENDOR_MASK;
  686. mr6 = (readl_relaxed(info + DRAM_VENDOR_MR6) >> DRAM_VENDOR_SHIFT) &
  687. DRAM_VENDOR_MASK;
  688. mr7 = (readl_relaxed(info + DRAM_VENDOR_MR7) >> DRAM_VENDOR_SHIFT) &
  689. DRAM_VENDOR_MASK;
  690. mr8 = (readl_relaxed(info + DRAM_VENDOR_MR8) >> DRAM_VENDOR_SHIFT) &
  691. DRAM_VENDOR_MASK;
  692. err = readl_relaxed(info + DRAM_VENDOR_ERROR) & DRAM_VENDOR_MASK;
  693. return sprintf(buf, "%#x %#x %#x %#x %#x\n", mr5, mr6, mr7, mr8, err);
  694. }
  695. static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
  696. char *buf)
  697. {
  698. u32 response[MSG_FIELD_MAX];
  699. struct brcmstb_dpfe_priv *priv;
  700. ssize_t ret;
  701. u32 mr4, mr5, mr6, mr7, mr8, err;
  702. priv = dev_get_drvdata(dev);
  703. ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
  704. if (ret)
  705. return ret;
  706. mr4 = response[MSG_ARG0 + 0] & DRAM_INFO_MR4_MASK;
  707. mr5 = response[MSG_ARG0 + 1] & DRAM_DDR_INFO_MASK;
  708. mr6 = response[MSG_ARG0 + 2] & DRAM_DDR_INFO_MASK;
  709. mr7 = response[MSG_ARG0 + 3] & DRAM_DDR_INFO_MASK;
  710. mr8 = response[MSG_ARG0 + 4] & DRAM_DDR_INFO_MASK;
  711. err = response[MSG_ARG0 + 5] & DRAM_DDR_INFO_MASK;
  712. return sprintf(buf, "%#x %#x %#x %#x %#x %#x\n", mr4, mr5, mr6, mr7,
  713. mr8, err);
  714. }
  715. static int brcmstb_dpfe_resume(struct platform_device *pdev)
  716. {
  717. struct brcmstb_dpfe_priv *priv = platform_get_drvdata(pdev);
  718. return brcmstb_dpfe_download_firmware(priv);
  719. }
  720. static int brcmstb_dpfe_probe(struct platform_device *pdev)
  721. {
  722. struct device *dev = &pdev->dev;
  723. struct brcmstb_dpfe_priv *priv;
  724. int ret;
  725. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  726. if (!priv)
  727. return -ENOMEM;
  728. priv->dev = dev;
  729. mutex_init(&priv->lock);
  730. platform_set_drvdata(pdev, priv);
  731. priv->regs = devm_platform_ioremap_resource_byname(pdev, "dpfe-cpu");
  732. if (IS_ERR(priv->regs)) {
  733. dev_err(dev, "couldn't map DCPU registers\n");
  734. return -ENODEV;
  735. }
  736. priv->dmem = devm_platform_ioremap_resource_byname(pdev, "dpfe-dmem");
  737. if (IS_ERR(priv->dmem)) {
  738. dev_err(dev, "Couldn't map DCPU data memory\n");
  739. return -ENOENT;
  740. }
  741. priv->imem = devm_platform_ioremap_resource_byname(pdev, "dpfe-imem");
  742. if (IS_ERR(priv->imem)) {
  743. dev_err(dev, "Couldn't map DCPU instruction memory\n");
  744. return -ENOENT;
  745. }
  746. priv->dpfe_api = of_device_get_match_data(dev);
  747. if (unlikely(!priv->dpfe_api)) {
  748. /*
  749. * It should be impossible to end up here, but to be safe we
  750. * check anyway.
  751. */
  752. dev_err(dev, "Couldn't determine API\n");
  753. return -ENOENT;
  754. }
  755. ret = brcmstb_dpfe_download_firmware(priv);
  756. if (ret)
  757. return dev_err_probe(dev, ret, "Couldn't download firmware\n");
  758. ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
  759. if (!ret)
  760. dev_info(dev, "registered with API v%d.\n",
  761. priv->dpfe_api->version);
  762. return ret;
  763. }
  764. static void brcmstb_dpfe_remove(struct platform_device *pdev)
  765. {
  766. struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
  767. sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
  768. }
  769. static const struct of_device_id brcmstb_dpfe_of_match[] = {
  770. /* Use legacy API v2 for a select number of chips */
  771. { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_old_v2 },
  772. { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_old_v2 },
  773. { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_old_v2 },
  774. { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_new_v2 },
  775. /* API v3 is the default going forward */
  776. { .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
  777. {}
  778. };
  779. MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
  780. static struct platform_driver brcmstb_dpfe_driver = {
  781. .driver = {
  782. .name = DRVNAME,
  783. .of_match_table = brcmstb_dpfe_of_match,
  784. },
  785. .probe = brcmstb_dpfe_probe,
  786. .remove_new = brcmstb_dpfe_remove,
  787. .resume = brcmstb_dpfe_resume,
  788. };
  789. module_platform_driver(brcmstb_dpfe_driver);
  790. MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
  791. MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
  792. MODULE_LICENSE("GPL");