brcmstb_dpfe.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /*
  2. * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs
  3. *
  4. * Copyright (c) 2017 Broadcom
  5. *
  6. * Released under the GPLv2 only.
  7. * SPDX-License-Identifier: GPL-2.0
  8. */
  9. /*
  10. * This driver provides access to the DPFE interface of Broadcom STB SoCs.
  11. * The firmware running on the DCPU inside the DDR PHY can provide current
  12. * information about the system's RAM, for instance the DRAM refresh rate.
  13. * This can be used as an indirect indicator for the DRAM's temperature.
  14. * Slower refresh rate means cooler RAM, higher refresh rate means hotter
  15. * RAM.
  16. *
  17. * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which
  18. * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls.
  19. *
  20. * Note regarding the loading of the firmware image: we use be32_to_cpu()
  21. * and le_32_to_cpu(), so we can support the following four cases:
  22. * - LE kernel + LE firmware image (the most common case)
  23. * - LE kernel + BE firmware image
  24. * - BE kernel + LE firmware image
  25. * - BE kernel + BE firmware image
  26. *
  27. * The DPCU always runs in big endian mode. The firwmare image, however, can
  28. * be in either format. Also, communication between host CPU and DCPU is
  29. * always in little endian.
  30. */
  31. #include <linux/delay.h>
  32. #include <linux/firmware.h>
  33. #include <linux/io.h>
  34. #include <linux/module.h>
  35. #include <linux/of_address.h>
  36. #include <linux/platform_device.h>
  37. #define DRVNAME "brcmstb-dpfe"
  38. #define FIRMWARE_NAME "dpfe.bin"
  39. /* DCPU register offsets */
  40. #define REG_DCPU_RESET 0x0
  41. #define REG_TO_DCPU_MBOX 0x10
  42. #define REG_TO_HOST_MBOX 0x14
  43. /* Macros to process offsets returned by the DCPU */
  44. #define DRAM_MSG_ADDR_OFFSET 0x0
  45. #define DRAM_MSG_TYPE_OFFSET 0x1c
  46. #define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
  47. #define DRAM_MSG_TYPE_MASK ((1UL << \
  48. (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
  49. /* Message RAM */
  50. #define DCPU_MSG_RAM_START 0x100
  51. #define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
  52. /* DRAM Info Offsets & Masks */
  53. #define DRAM_INFO_INTERVAL 0x0
  54. #define DRAM_INFO_MR4 0x4
  55. #define DRAM_INFO_ERROR 0x8
  56. #define DRAM_INFO_MR4_MASK 0xff
  57. /* DRAM MR4 Offsets & Masks */
  58. #define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
  59. #define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */
  60. #define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */
  61. #define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */
  62. #define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */
  63. #define DRAM_MR4_REFRESH_MASK 0x7
  64. #define DRAM_MR4_SR_ABORT_MASK 0x1
  65. #define DRAM_MR4_PPRE_MASK 0x1
  66. #define DRAM_MR4_TH_OFFS_MASK 0x3
  67. #define DRAM_MR4_TUF_MASK 0x1
  68. /* DRAM Vendor Offsets & Masks */
  69. #define DRAM_VENDOR_MR5 0x0
  70. #define DRAM_VENDOR_MR6 0x4
  71. #define DRAM_VENDOR_MR7 0x8
  72. #define DRAM_VENDOR_MR8 0xc
  73. #define DRAM_VENDOR_ERROR 0x10
  74. #define DRAM_VENDOR_MASK 0xff
  75. /* Reset register bits & masks */
  76. #define DCPU_RESET_SHIFT 0x0
  77. #define DCPU_RESET_MASK 0x1
  78. #define DCPU_CLK_DISABLE_SHIFT 0x2
  79. /* DCPU return codes */
  80. #define DCPU_RET_ERROR_BIT BIT(31)
  81. #define DCPU_RET_SUCCESS 0x1
  82. #define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
  83. #define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
  84. #define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
  85. #define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
  86. /* This error code is not firmware defined and only used in the driver. */
  87. #define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
  88. /* Firmware magic */
  89. #define DPFE_BE_MAGIC 0xfe1010fe
  90. #define DPFE_LE_MAGIC 0xfe0101fe
  91. /* Error codes */
  92. #define ERR_INVALID_MAGIC -1
  93. #define ERR_INVALID_SIZE -2
  94. #define ERR_INVALID_CHKSUM -3
  95. /* Message types */
  96. #define DPFE_MSG_TYPE_COMMAND 1
  97. #define DPFE_MSG_TYPE_RESPONSE 2
  98. #define DELAY_LOOP_MAX 200000
  99. enum dpfe_msg_fields {
  100. MSG_HEADER,
  101. MSG_COMMAND,
  102. MSG_ARG_COUNT,
  103. MSG_ARG0,
  104. MSG_CHKSUM,
  105. MSG_FIELD_MAX /* Last entry */
  106. };
  107. enum dpfe_commands {
  108. DPFE_CMD_GET_INFO,
  109. DPFE_CMD_GET_REFRESH,
  110. DPFE_CMD_GET_VENDOR,
  111. DPFE_CMD_MAX /* Last entry */
  112. };
  113. struct dpfe_msg {
  114. u32 header;
  115. u32 command;
  116. u32 arg_count;
  117. u32 arg0;
  118. u32 chksum; /* This is the sum of all other entries. */
  119. };
  120. /*
  121. * Format of the binary firmware file:
  122. *
  123. * entry
  124. * 0 header
  125. * value: 0xfe0101fe <== little endian
  126. * 0xfe1010fe <== big endian
  127. * 1 sequence:
  128. * [31:16] total segments on this build
  129. * [15:0] this segment sequence.
  130. * 2 FW version
  131. * 3 IMEM byte size
  132. * 4 DMEM byte size
  133. * IMEM
  134. * DMEM
  135. * last checksum ==> sum of everything
  136. */
  137. struct dpfe_firmware_header {
  138. u32 magic;
  139. u32 sequence;
  140. u32 version;
  141. u32 imem_size;
  142. u32 dmem_size;
  143. };
  144. /* Things we only need during initialization. */
  145. struct init_data {
  146. unsigned int dmem_len;
  147. unsigned int imem_len;
  148. unsigned int chksum;
  149. bool is_big_endian;
  150. };
  151. /* Things we need for as long as we are active. */
  152. struct private_data {
  153. void __iomem *regs;
  154. void __iomem *dmem;
  155. void __iomem *imem;
  156. struct device *dev;
  157. struct mutex lock;
  158. };
  159. static const char *error_text[] = {
  160. "Success", "Header code incorrect", "Unknown command or argument",
  161. "Incorrect checksum", "Malformed command", "Timed out",
  162. };
  163. /* List of supported firmware commands */
  164. static const u32 dpfe_commands[DPFE_CMD_MAX][MSG_FIELD_MAX] = {
  165. [DPFE_CMD_GET_INFO] = {
  166. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  167. [MSG_COMMAND] = 1,
  168. [MSG_ARG_COUNT] = 1,
  169. [MSG_ARG0] = 1,
  170. [MSG_CHKSUM] = 4,
  171. },
  172. [DPFE_CMD_GET_REFRESH] = {
  173. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  174. [MSG_COMMAND] = 2,
  175. [MSG_ARG_COUNT] = 1,
  176. [MSG_ARG0] = 1,
  177. [MSG_CHKSUM] = 5,
  178. },
  179. [DPFE_CMD_GET_VENDOR] = {
  180. [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
  181. [MSG_COMMAND] = 2,
  182. [MSG_ARG_COUNT] = 1,
  183. [MSG_ARG0] = 2,
  184. [MSG_CHKSUM] = 6,
  185. },
  186. };
  187. static bool is_dcpu_enabled(void __iomem *regs)
  188. {
  189. u32 val;
  190. val = readl_relaxed(regs + REG_DCPU_RESET);
  191. return !(val & DCPU_RESET_MASK);
  192. }
  193. static void __disable_dcpu(void __iomem *regs)
  194. {
  195. u32 val;
  196. if (!is_dcpu_enabled(regs))
  197. return;
  198. /* Put DCPU in reset if it's running. */
  199. val = readl_relaxed(regs + REG_DCPU_RESET);
  200. val |= (1 << DCPU_RESET_SHIFT);
  201. writel_relaxed(val, regs + REG_DCPU_RESET);
  202. }
  203. static void __enable_dcpu(void __iomem *regs)
  204. {
  205. u32 val;
  206. /* Clear mailbox registers. */
  207. writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
  208. writel_relaxed(0, regs + REG_TO_HOST_MBOX);
  209. /* Disable DCPU clock gating */
  210. val = readl_relaxed(regs + REG_DCPU_RESET);
  211. val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
  212. writel_relaxed(val, regs + REG_DCPU_RESET);
  213. /* Take DCPU out of reset */
  214. val = readl_relaxed(regs + REG_DCPU_RESET);
  215. val &= ~(1 << DCPU_RESET_SHIFT);
  216. writel_relaxed(val, regs + REG_DCPU_RESET);
  217. }
  218. static unsigned int get_msg_chksum(const u32 msg[])
  219. {
  220. unsigned int sum = 0;
  221. unsigned int i;
  222. /* Don't include the last field in the checksum. */
  223. for (i = 0; i < MSG_FIELD_MAX - 1; i++)
  224. sum += msg[i];
  225. return sum;
  226. }
  227. static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
  228. char *buf, ssize_t *size)
  229. {
  230. unsigned int msg_type;
  231. unsigned int offset;
  232. void __iomem *ptr = NULL;
  233. msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
  234. offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
  235. /*
  236. * msg_type == 1: the offset is relative to the message RAM
  237. * msg_type == 0: the offset is relative to the data RAM (this is the
  238. * previous way of passing data)
  239. * msg_type is anything else: there's critical hardware problem
  240. */
  241. switch (msg_type) {
  242. case 1:
  243. ptr = priv->regs + DCPU_MSG_RAM_START + offset;
  244. break;
  245. case 0:
  246. ptr = priv->dmem + offset;
  247. break;
  248. default:
  249. dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
  250. response);
  251. if (buf && size)
  252. *size = sprintf(buf,
  253. "FATAL: communication error with DCPU\n");
  254. }
  255. return ptr;
  256. }
  257. static int __send_command(struct private_data *priv, unsigned int cmd,
  258. u32 result[])
  259. {
  260. const u32 *msg = dpfe_commands[cmd];
  261. void __iomem *regs = priv->regs;
  262. unsigned int i, chksum;
  263. int ret = 0;
  264. u32 resp;
  265. if (cmd >= DPFE_CMD_MAX)
  266. return -1;
  267. mutex_lock(&priv->lock);
  268. /* Write command and arguments to message area */
  269. for (i = 0; i < MSG_FIELD_MAX; i++)
  270. writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
  271. /* Tell DCPU there is a command waiting */
  272. writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
  273. /* Wait for DCPU to process the command */
  274. for (i = 0; i < DELAY_LOOP_MAX; i++) {
  275. /* Read response code */
  276. resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
  277. if (resp > 0)
  278. break;
  279. udelay(5);
  280. }
  281. if (i == DELAY_LOOP_MAX) {
  282. resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
  283. ret = -ffs(resp);
  284. } else {
  285. /* Read response data */
  286. for (i = 0; i < MSG_FIELD_MAX; i++)
  287. result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
  288. }
  289. /* Tell DCPU we are done */
  290. writel_relaxed(0, regs + REG_TO_HOST_MBOX);
  291. mutex_unlock(&priv->lock);
  292. if (ret)
  293. return ret;
  294. /* Verify response */
  295. chksum = get_msg_chksum(result);
  296. if (chksum != result[MSG_CHKSUM])
  297. resp = DCPU_RET_ERR_CHKSUM;
  298. if (resp != DCPU_RET_SUCCESS) {
  299. resp &= ~DCPU_RET_ERROR_BIT;
  300. ret = -ffs(resp);
  301. }
  302. return ret;
  303. }
  304. /* Ensure that the firmware file loaded meets all the requirements. */
  305. static int __verify_firmware(struct init_data *init,
  306. const struct firmware *fw)
  307. {
  308. const struct dpfe_firmware_header *header = (void *)fw->data;
  309. unsigned int dmem_size, imem_size, total_size;
  310. bool is_big_endian = false;
  311. const u32 *chksum_ptr;
  312. if (header->magic == DPFE_BE_MAGIC)
  313. is_big_endian = true;
  314. else if (header->magic != DPFE_LE_MAGIC)
  315. return ERR_INVALID_MAGIC;
  316. if (is_big_endian) {
  317. dmem_size = be32_to_cpu(header->dmem_size);
  318. imem_size = be32_to_cpu(header->imem_size);
  319. } else {
  320. dmem_size = le32_to_cpu(header->dmem_size);
  321. imem_size = le32_to_cpu(header->imem_size);
  322. }
  323. /* Data and instruction sections are 32 bit words. */
  324. if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
  325. return ERR_INVALID_SIZE;
  326. /*
  327. * The header + the data section + the instruction section + the
  328. * checksum must be equal to the total firmware size.
  329. */
  330. total_size = dmem_size + imem_size + sizeof(*header) +
  331. sizeof(*chksum_ptr);
  332. if (total_size != fw->size)
  333. return ERR_INVALID_SIZE;
  334. /* The checksum comes at the very end. */
  335. chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
  336. init->is_big_endian = is_big_endian;
  337. init->dmem_len = dmem_size;
  338. init->imem_len = imem_size;
  339. init->chksum = (is_big_endian)
  340. ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
  341. return 0;
  342. }
  343. /* Verify checksum by reading back the firmware from co-processor RAM. */
  344. static int __verify_fw_checksum(struct init_data *init,
  345. struct private_data *priv,
  346. const struct dpfe_firmware_header *header,
  347. u32 checksum)
  348. {
  349. u32 magic, sequence, version, sum;
  350. u32 __iomem *dmem = priv->dmem;
  351. u32 __iomem *imem = priv->imem;
  352. unsigned int i;
  353. if (init->is_big_endian) {
  354. magic = be32_to_cpu(header->magic);
  355. sequence = be32_to_cpu(header->sequence);
  356. version = be32_to_cpu(header->version);
  357. } else {
  358. magic = le32_to_cpu(header->magic);
  359. sequence = le32_to_cpu(header->sequence);
  360. version = le32_to_cpu(header->version);
  361. }
  362. sum = magic + sequence + version + init->dmem_len + init->imem_len;
  363. for (i = 0; i < init->dmem_len / sizeof(u32); i++)
  364. sum += readl_relaxed(dmem + i);
  365. for (i = 0; i < init->imem_len / sizeof(u32); i++)
  366. sum += readl_relaxed(imem + i);
  367. return (sum == checksum) ? 0 : -1;
  368. }
  369. static int __write_firmware(u32 __iomem *mem, const u32 *fw,
  370. unsigned int size, bool is_big_endian)
  371. {
  372. unsigned int i;
  373. /* Convert size to 32-bit words. */
  374. size /= sizeof(u32);
  375. /* It is recommended to clear the firmware area first. */
  376. for (i = 0; i < size; i++)
  377. writel_relaxed(0, mem + i);
  378. /* Now copy it. */
  379. if (is_big_endian) {
  380. for (i = 0; i < size; i++)
  381. writel_relaxed(be32_to_cpu(fw[i]), mem + i);
  382. } else {
  383. for (i = 0; i < size; i++)
  384. writel_relaxed(le32_to_cpu(fw[i]), mem + i);
  385. }
  386. return 0;
  387. }
  388. static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
  389. struct init_data *init)
  390. {
  391. const struct dpfe_firmware_header *header;
  392. unsigned int dmem_size, imem_size;
  393. struct device *dev = &pdev->dev;
  394. bool is_big_endian = false;
  395. struct private_data *priv;
  396. const struct firmware *fw;
  397. const u32 *dmem, *imem;
  398. const void *fw_blob;
  399. int ret;
  400. priv = platform_get_drvdata(pdev);
  401. /*
  402. * Skip downloading the firmware if the DCPU is already running and
  403. * responding to commands.
  404. */
  405. if (is_dcpu_enabled(priv->regs)) {
  406. u32 response[MSG_FIELD_MAX];
  407. ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
  408. if (!ret)
  409. return 0;
  410. }
  411. ret = request_firmware(&fw, FIRMWARE_NAME, dev);
  412. /* request_firmware() prints its own error messages. */
  413. if (ret)
  414. return ret;
  415. ret = __verify_firmware(init, fw);
  416. if (ret)
  417. return -EFAULT;
  418. __disable_dcpu(priv->regs);
  419. is_big_endian = init->is_big_endian;
  420. dmem_size = init->dmem_len;
  421. imem_size = init->imem_len;
  422. /* At the beginning of the firmware blob is a header. */
  423. header = (struct dpfe_firmware_header *)fw->data;
  424. /* Void pointer to the beginning of the actual firmware. */
  425. fw_blob = fw->data + sizeof(*header);
  426. /* IMEM comes right after the header. */
  427. imem = fw_blob;
  428. /* DMEM follows after IMEM. */
  429. dmem = fw_blob + imem_size;
  430. ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
  431. if (ret)
  432. return ret;
  433. ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
  434. if (ret)
  435. return ret;
  436. ret = __verify_fw_checksum(init, priv, header, init->chksum);
  437. if (ret)
  438. return ret;
  439. __enable_dcpu(priv->regs);
  440. return 0;
  441. }
  442. static ssize_t generic_show(unsigned int command, u32 response[],
  443. struct device *dev, char *buf)
  444. {
  445. struct private_data *priv;
  446. int ret;
  447. priv = dev_get_drvdata(dev);
  448. if (!priv)
  449. return sprintf(buf, "ERROR: driver private data not set\n");
  450. ret = __send_command(priv, command, response);
  451. if (ret < 0)
  452. return sprintf(buf, "ERROR: %s\n", error_text[-ret]);
  453. return 0;
  454. }
  455. static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
  456. char *buf)
  457. {
  458. u32 response[MSG_FIELD_MAX];
  459. unsigned int info;
  460. ssize_t ret;
  461. ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
  462. if (ret)
  463. return ret;
  464. info = response[MSG_ARG0];
  465. return sprintf(buf, "%u.%u.%u.%u\n",
  466. (info >> 24) & 0xff,
  467. (info >> 16) & 0xff,
  468. (info >> 8) & 0xff,
  469. info & 0xff);
  470. }
  471. static ssize_t show_refresh(struct device *dev,
  472. struct device_attribute *devattr, char *buf)
  473. {
  474. u32 response[MSG_FIELD_MAX];
  475. void __iomem *info;
  476. struct private_data *priv;
  477. u8 refresh, sr_abort, ppre, thermal_offs, tuf;
  478. u32 mr4;
  479. ssize_t ret;
  480. ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
  481. if (ret)
  482. return ret;
  483. priv = dev_get_drvdata(dev);
  484. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  485. if (!info)
  486. return ret;
  487. mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
  488. refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
  489. sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
  490. ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
  491. thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
  492. tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
  493. return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
  494. readl_relaxed(info + DRAM_INFO_INTERVAL),
  495. refresh, sr_abort, ppre, thermal_offs, tuf,
  496. readl_relaxed(info + DRAM_INFO_ERROR));
  497. }
  498. static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
  499. const char *buf, size_t count)
  500. {
  501. u32 response[MSG_FIELD_MAX];
  502. struct private_data *priv;
  503. void __iomem *info;
  504. unsigned long val;
  505. int ret;
  506. if (kstrtoul(buf, 0, &val) < 0)
  507. return -EINVAL;
  508. priv = dev_get_drvdata(dev);
  509. ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
  510. if (ret)
  511. return ret;
  512. info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
  513. if (!info)
  514. return -EIO;
  515. writel_relaxed(val, info + DRAM_INFO_INTERVAL);
  516. return count;
  517. }
  518. static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
  519. char *buf)
  520. {
  521. u32 response[MSG_FIELD_MAX];
  522. struct private_data *priv;
  523. void __iomem *info;
  524. ssize_t ret;
  525. ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
  526. if (ret)
  527. return ret;
  528. priv = dev_get_drvdata(dev);
  529. info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
  530. if (!info)
  531. return ret;
  532. return sprintf(buf, "%#x %#x %#x %#x %#x\n",
  533. readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
  534. readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
  535. readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
  536. readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
  537. readl_relaxed(info + DRAM_VENDOR_ERROR) &
  538. DRAM_VENDOR_MASK);
  539. }
  540. static int brcmstb_dpfe_resume(struct platform_device *pdev)
  541. {
  542. struct init_data init;
  543. return brcmstb_dpfe_download_firmware(pdev, &init);
  544. }
  545. static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
  546. static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
  547. static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
  548. static struct attribute *dpfe_attrs[] = {
  549. &dev_attr_dpfe_info.attr,
  550. &dev_attr_dpfe_refresh.attr,
  551. &dev_attr_dpfe_vendor.attr,
  552. NULL
  553. };
  554. ATTRIBUTE_GROUPS(dpfe);
  555. static int brcmstb_dpfe_probe(struct platform_device *pdev)
  556. {
  557. struct device *dev = &pdev->dev;
  558. struct private_data *priv;
  559. struct init_data init;
  560. struct resource *res;
  561. int ret;
  562. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  563. if (!priv)
  564. return -ENOMEM;
  565. mutex_init(&priv->lock);
  566. platform_set_drvdata(pdev, priv);
  567. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu");
  568. priv->regs = devm_ioremap_resource(dev, res);
  569. if (IS_ERR(priv->regs)) {
  570. dev_err(dev, "couldn't map DCPU registers\n");
  571. return -ENODEV;
  572. }
  573. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem");
  574. priv->dmem = devm_ioremap_resource(dev, res);
  575. if (IS_ERR(priv->dmem)) {
  576. dev_err(dev, "Couldn't map DCPU data memory\n");
  577. return -ENOENT;
  578. }
  579. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem");
  580. priv->imem = devm_ioremap_resource(dev, res);
  581. if (IS_ERR(priv->imem)) {
  582. dev_err(dev, "Couldn't map DCPU instruction memory\n");
  583. return -ENOENT;
  584. }
  585. ret = brcmstb_dpfe_download_firmware(pdev, &init);
  586. if (ret)
  587. return ret;
  588. ret = sysfs_create_groups(&pdev->dev.kobj, dpfe_groups);
  589. if (!ret)
  590. dev_info(dev, "registered.\n");
  591. return ret;
  592. }
  593. static int brcmstb_dpfe_remove(struct platform_device *pdev)
  594. {
  595. sysfs_remove_groups(&pdev->dev.kobj, dpfe_groups);
  596. return 0;
  597. }
  598. static const struct of_device_id brcmstb_dpfe_of_match[] = {
  599. { .compatible = "brcm,dpfe-cpu", },
  600. {}
  601. };
  602. MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
  603. static struct platform_driver brcmstb_dpfe_driver = {
  604. .driver = {
  605. .name = DRVNAME,
  606. .of_match_table = brcmstb_dpfe_of_match,
  607. },
  608. .probe = brcmstb_dpfe_probe,
  609. .remove = brcmstb_dpfe_remove,
  610. .resume = brcmstb_dpfe_resume,
  611. };
  612. module_platform_driver(brcmstb_dpfe_driver);
  613. MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
  614. MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
  615. MODULE_LICENSE("GPL");