atmel-tdes.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cryptographic API.
  4. *
  5. * Support for ATMEL DES/TDES HW acceleration.
  6. *
  7. * Copyright (c) 2012 Eukréa Electromatique - ATMEL
  8. * Author: Nicolas Royer <nicolas@eukrea.com>
  9. *
  10. * Some ideas are from omap-aes.c drivers.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/io.h>
  18. #include <linux/hw_random.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/device.h>
  21. #include <linux/dmaengine.h>
  22. #include <linux/init.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irq.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/mod_devicetable.h>
  29. #include <linux/delay.h>
  30. #include <linux/crypto.h>
  31. #include <crypto/scatterwalk.h>
  32. #include <crypto/algapi.h>
  33. #include <crypto/internal/des.h>
  34. #include <crypto/internal/skcipher.h>
  35. #include "atmel-tdes-regs.h"
  36. #define ATMEL_TDES_PRIORITY 300
  37. /* TDES flags */
  38. /* Reserve bits [17:16], [13:12], [2:0] for AES Mode Register */
  39. #define TDES_FLAGS_ENCRYPT TDES_MR_CYPHER_ENC
  40. #define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
  41. #define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
  42. #define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
  43. #define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
  44. #define TDES_FLAGS_INIT BIT(3)
  45. #define TDES_FLAGS_FAST BIT(4)
  46. #define TDES_FLAGS_BUSY BIT(5)
  47. #define TDES_FLAGS_DMA BIT(6)
  48. #define ATMEL_TDES_QUEUE_LENGTH 50
  49. struct atmel_tdes_caps {
  50. bool has_dma;
  51. };
  52. struct atmel_tdes_dev;
  53. struct atmel_tdes_ctx {
  54. struct atmel_tdes_dev *dd;
  55. int keylen;
  56. u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)];
  57. unsigned long flags;
  58. u16 block_size;
  59. };
  60. struct atmel_tdes_reqctx {
  61. unsigned long mode;
  62. u8 lastc[DES_BLOCK_SIZE];
  63. };
  64. struct atmel_tdes_dma {
  65. struct dma_chan *chan;
  66. struct dma_slave_config dma_conf;
  67. };
  68. struct atmel_tdes_dev {
  69. struct list_head list;
  70. unsigned long phys_base;
  71. void __iomem *io_base;
  72. struct atmel_tdes_ctx *ctx;
  73. struct device *dev;
  74. struct clk *iclk;
  75. int irq;
  76. unsigned long flags;
  77. spinlock_t lock;
  78. struct crypto_queue queue;
  79. struct tasklet_struct done_task;
  80. struct tasklet_struct queue_task;
  81. struct skcipher_request *req;
  82. size_t total;
  83. struct scatterlist *in_sg;
  84. unsigned int nb_in_sg;
  85. size_t in_offset;
  86. struct scatterlist *out_sg;
  87. unsigned int nb_out_sg;
  88. size_t out_offset;
  89. size_t buflen;
  90. size_t dma_size;
  91. void *buf_in;
  92. int dma_in;
  93. dma_addr_t dma_addr_in;
  94. struct atmel_tdes_dma dma_lch_in;
  95. void *buf_out;
  96. int dma_out;
  97. dma_addr_t dma_addr_out;
  98. struct atmel_tdes_dma dma_lch_out;
  99. struct atmel_tdes_caps caps;
  100. u32 hw_version;
  101. };
  102. struct atmel_tdes_drv {
  103. struct list_head dev_list;
  104. spinlock_t lock;
  105. };
  106. static struct atmel_tdes_drv atmel_tdes = {
  107. .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
  108. .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
  109. };
  110. static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
  111. void *buf, size_t buflen, size_t total, int out)
  112. {
  113. size_t count, off = 0;
  114. while (buflen && total) {
  115. count = min((*sg)->length - *offset, total);
  116. count = min(count, buflen);
  117. if (!count)
  118. return off;
  119. scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
  120. off += count;
  121. buflen -= count;
  122. *offset += count;
  123. total -= count;
  124. if (*offset == (*sg)->length) {
  125. *sg = sg_next(*sg);
  126. if (*sg)
  127. *offset = 0;
  128. else
  129. total = 0;
  130. }
  131. }
  132. return off;
  133. }
  134. static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
  135. {
  136. return readl_relaxed(dd->io_base + offset);
  137. }
  138. static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
  139. u32 offset, u32 value)
  140. {
  141. writel_relaxed(value, dd->io_base + offset);
  142. }
  143. static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
  144. const u32 *value, int count)
  145. {
  146. for (; count--; value++, offset += 4)
  147. atmel_tdes_write(dd, offset, *value);
  148. }
  149. static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
  150. {
  151. struct atmel_tdes_dev *tdes_dd;
  152. spin_lock_bh(&atmel_tdes.lock);
  153. /* One TDES IP per SoC. */
  154. tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
  155. struct atmel_tdes_dev, list);
  156. spin_unlock_bh(&atmel_tdes.lock);
  157. return tdes_dd;
  158. }
  159. static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
  160. {
  161. int err;
  162. err = clk_prepare_enable(dd->iclk);
  163. if (err)
  164. return err;
  165. if (!(dd->flags & TDES_FLAGS_INIT)) {
  166. atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
  167. dd->flags |= TDES_FLAGS_INIT;
  168. }
  169. return 0;
  170. }
  171. static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
  172. {
  173. return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
  174. }
  175. static int atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
  176. {
  177. int err;
  178. err = atmel_tdes_hw_init(dd);
  179. if (err)
  180. return err;
  181. dd->hw_version = atmel_tdes_get_version(dd);
  182. dev_info(dd->dev,
  183. "version: 0x%x\n", dd->hw_version);
  184. clk_disable_unprepare(dd->iclk);
  185. return 0;
  186. }
  187. static void atmel_tdes_dma_callback(void *data)
  188. {
  189. struct atmel_tdes_dev *dd = data;
  190. /* dma_lch_out - completed */
  191. tasklet_schedule(&dd->done_task);
  192. }
  193. static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
  194. {
  195. int err;
  196. u32 valmr = TDES_MR_SMOD_PDC;
  197. err = atmel_tdes_hw_init(dd);
  198. if (err)
  199. return err;
  200. if (!dd->caps.has_dma)
  201. atmel_tdes_write(dd, TDES_PTCR,
  202. TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
  203. /* MR register must be set before IV registers */
  204. if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
  205. valmr |= TDES_MR_KEYMOD_3KEY;
  206. valmr |= TDES_MR_TDESMOD_TDES;
  207. } else if (dd->ctx->keylen > DES_KEY_SIZE) {
  208. valmr |= TDES_MR_KEYMOD_2KEY;
  209. valmr |= TDES_MR_TDESMOD_TDES;
  210. } else {
  211. valmr |= TDES_MR_TDESMOD_DES;
  212. }
  213. valmr |= dd->flags & TDES_FLAGS_MODE_MASK;
  214. atmel_tdes_write(dd, TDES_MR, valmr);
  215. atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
  216. dd->ctx->keylen >> 2);
  217. if (dd->req->iv && (valmr & TDES_MR_OPMOD_MASK) != TDES_MR_OPMOD_ECB)
  218. atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
  219. return 0;
  220. }
  221. static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
  222. {
  223. int err = 0;
  224. size_t count;
  225. atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
  226. if (dd->flags & TDES_FLAGS_FAST) {
  227. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  228. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  229. } else {
  230. dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
  231. dd->dma_size, DMA_FROM_DEVICE);
  232. /* copy data */
  233. count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
  234. dd->buf_out, dd->buflen, dd->dma_size, 1);
  235. if (count != dd->dma_size) {
  236. err = -EINVAL;
  237. dev_dbg(dd->dev, "not all data converted: %zu\n", count);
  238. }
  239. }
  240. return err;
  241. }
  242. static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
  243. {
  244. int err = -ENOMEM;
  245. dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
  246. dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
  247. dd->buflen = PAGE_SIZE;
  248. dd->buflen &= ~(DES_BLOCK_SIZE - 1);
  249. if (!dd->buf_in || !dd->buf_out) {
  250. dev_dbg(dd->dev, "unable to alloc pages.\n");
  251. goto err_alloc;
  252. }
  253. /* MAP here */
  254. dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
  255. dd->buflen, DMA_TO_DEVICE);
  256. err = dma_mapping_error(dd->dev, dd->dma_addr_in);
  257. if (err) {
  258. dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
  259. goto err_map_in;
  260. }
  261. dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
  262. dd->buflen, DMA_FROM_DEVICE);
  263. err = dma_mapping_error(dd->dev, dd->dma_addr_out);
  264. if (err) {
  265. dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
  266. goto err_map_out;
  267. }
  268. return 0;
  269. err_map_out:
  270. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
  271. DMA_TO_DEVICE);
  272. err_map_in:
  273. err_alloc:
  274. free_page((unsigned long)dd->buf_out);
  275. free_page((unsigned long)dd->buf_in);
  276. return err;
  277. }
  278. static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
  279. {
  280. dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
  281. DMA_FROM_DEVICE);
  282. dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
  283. DMA_TO_DEVICE);
  284. free_page((unsigned long)dd->buf_out);
  285. free_page((unsigned long)dd->buf_in);
  286. }
  287. static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
  288. dma_addr_t dma_addr_in,
  289. dma_addr_t dma_addr_out, int length)
  290. {
  291. int len32;
  292. dd->dma_size = length;
  293. if (!(dd->flags & TDES_FLAGS_FAST)) {
  294. dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  295. DMA_TO_DEVICE);
  296. }
  297. len32 = DIV_ROUND_UP(length, sizeof(u32));
  298. atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
  299. atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
  300. atmel_tdes_write(dd, TDES_TCR, len32);
  301. atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
  302. atmel_tdes_write(dd, TDES_RCR, len32);
  303. /* Enable Interrupt */
  304. atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
  305. /* Start DMA transfer */
  306. atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
  307. return 0;
  308. }
  309. static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
  310. dma_addr_t dma_addr_in,
  311. dma_addr_t dma_addr_out, int length)
  312. {
  313. struct scatterlist sg[2];
  314. struct dma_async_tx_descriptor *in_desc, *out_desc;
  315. enum dma_slave_buswidth addr_width;
  316. dd->dma_size = length;
  317. if (!(dd->flags & TDES_FLAGS_FAST)) {
  318. dma_sync_single_for_device(dd->dev, dma_addr_in, length,
  319. DMA_TO_DEVICE);
  320. }
  321. addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  322. dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
  323. dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
  324. dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
  325. dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
  326. dd->flags |= TDES_FLAGS_DMA;
  327. sg_init_table(&sg[0], 1);
  328. sg_dma_address(&sg[0]) = dma_addr_in;
  329. sg_dma_len(&sg[0]) = length;
  330. sg_init_table(&sg[1], 1);
  331. sg_dma_address(&sg[1]) = dma_addr_out;
  332. sg_dma_len(&sg[1]) = length;
  333. in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
  334. 1, DMA_MEM_TO_DEV,
  335. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  336. if (!in_desc)
  337. return -EINVAL;
  338. out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
  339. 1, DMA_DEV_TO_MEM,
  340. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  341. if (!out_desc)
  342. return -EINVAL;
  343. out_desc->callback = atmel_tdes_dma_callback;
  344. out_desc->callback_param = dd;
  345. dmaengine_submit(out_desc);
  346. dma_async_issue_pending(dd->dma_lch_out.chan);
  347. dmaengine_submit(in_desc);
  348. dma_async_issue_pending(dd->dma_lch_in.chan);
  349. return 0;
  350. }
  351. static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
  352. {
  353. int err, fast = 0, in, out;
  354. size_t count;
  355. dma_addr_t addr_in, addr_out;
  356. if ((!dd->in_offset) && (!dd->out_offset)) {
  357. /* check for alignment */
  358. in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
  359. IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
  360. out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
  361. IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
  362. fast = in && out;
  363. if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
  364. fast = 0;
  365. }
  366. if (fast) {
  367. count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
  368. count = min_t(size_t, count, sg_dma_len(dd->out_sg));
  369. err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  370. if (!err) {
  371. dev_dbg(dd->dev, "dma_map_sg() error\n");
  372. return -EINVAL;
  373. }
  374. err = dma_map_sg(dd->dev, dd->out_sg, 1,
  375. DMA_FROM_DEVICE);
  376. if (!err) {
  377. dev_dbg(dd->dev, "dma_map_sg() error\n");
  378. dma_unmap_sg(dd->dev, dd->in_sg, 1,
  379. DMA_TO_DEVICE);
  380. return -EINVAL;
  381. }
  382. addr_in = sg_dma_address(dd->in_sg);
  383. addr_out = sg_dma_address(dd->out_sg);
  384. dd->flags |= TDES_FLAGS_FAST;
  385. } else {
  386. /* use cache buffers */
  387. count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
  388. dd->buf_in, dd->buflen, dd->total, 0);
  389. addr_in = dd->dma_addr_in;
  390. addr_out = dd->dma_addr_out;
  391. dd->flags &= ~TDES_FLAGS_FAST;
  392. }
  393. dd->total -= count;
  394. if (dd->caps.has_dma)
  395. err = atmel_tdes_crypt_dma(dd, addr_in, addr_out, count);
  396. else
  397. err = atmel_tdes_crypt_pdc(dd, addr_in, addr_out, count);
  398. if (err && (dd->flags & TDES_FLAGS_FAST)) {
  399. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  400. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
  401. }
  402. return err;
  403. }
  404. static void
  405. atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
  406. {
  407. struct skcipher_request *req = dd->req;
  408. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
  409. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  410. unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  411. if (req->cryptlen < ivsize)
  412. return;
  413. if (rctx->mode & TDES_FLAGS_ENCRYPT)
  414. scatterwalk_map_and_copy(req->iv, req->dst,
  415. req->cryptlen - ivsize, ivsize, 0);
  416. else
  417. memcpy(req->iv, rctx->lastc, ivsize);
  418. }
  419. static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
  420. {
  421. struct skcipher_request *req = dd->req;
  422. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
  423. clk_disable_unprepare(dd->iclk);
  424. dd->flags &= ~TDES_FLAGS_BUSY;
  425. if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
  426. atmel_tdes_set_iv_as_last_ciphertext_block(dd);
  427. skcipher_request_complete(req, err);
  428. }
  429. static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
  430. struct skcipher_request *req)
  431. {
  432. struct crypto_async_request *async_req, *backlog;
  433. struct atmel_tdes_ctx *ctx;
  434. struct atmel_tdes_reqctx *rctx;
  435. unsigned long flags;
  436. int err, ret = 0;
  437. spin_lock_irqsave(&dd->lock, flags);
  438. if (req)
  439. ret = crypto_enqueue_request(&dd->queue, &req->base);
  440. if (dd->flags & TDES_FLAGS_BUSY) {
  441. spin_unlock_irqrestore(&dd->lock, flags);
  442. return ret;
  443. }
  444. backlog = crypto_get_backlog(&dd->queue);
  445. async_req = crypto_dequeue_request(&dd->queue);
  446. if (async_req)
  447. dd->flags |= TDES_FLAGS_BUSY;
  448. spin_unlock_irqrestore(&dd->lock, flags);
  449. if (!async_req)
  450. return ret;
  451. if (backlog)
  452. crypto_request_complete(backlog, -EINPROGRESS);
  453. req = skcipher_request_cast(async_req);
  454. /* assign new request to device */
  455. dd->req = req;
  456. dd->total = req->cryptlen;
  457. dd->in_offset = 0;
  458. dd->in_sg = req->src;
  459. dd->out_offset = 0;
  460. dd->out_sg = req->dst;
  461. rctx = skcipher_request_ctx(req);
  462. ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
  463. rctx->mode &= TDES_FLAGS_MODE_MASK;
  464. dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
  465. dd->ctx = ctx;
  466. err = atmel_tdes_write_ctrl(dd);
  467. if (!err)
  468. err = atmel_tdes_crypt_start(dd);
  469. if (err) {
  470. /* des_task will not finish it, so do it here */
  471. atmel_tdes_finish_req(dd, err);
  472. tasklet_schedule(&dd->queue_task);
  473. }
  474. return ret;
  475. }
  476. static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
  477. {
  478. int err = -EINVAL;
  479. size_t count;
  480. if (dd->flags & TDES_FLAGS_DMA) {
  481. err = 0;
  482. if (dd->flags & TDES_FLAGS_FAST) {
  483. dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
  484. dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
  485. } else {
  486. dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
  487. dd->dma_size, DMA_FROM_DEVICE);
  488. /* copy data */
  489. count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
  490. dd->buf_out, dd->buflen, dd->dma_size, 1);
  491. if (count != dd->dma_size) {
  492. err = -EINVAL;
  493. dev_dbg(dd->dev, "not all data converted: %zu\n", count);
  494. }
  495. }
  496. }
  497. return err;
  498. }
  499. static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
  500. {
  501. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  502. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
  503. struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
  504. struct device *dev = ctx->dd->dev;
  505. if (!req->cryptlen)
  506. return 0;
  507. if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
  508. dev_dbg(dev, "request size is not exact amount of DES blocks\n");
  509. return -EINVAL;
  510. }
  511. ctx->block_size = DES_BLOCK_SIZE;
  512. rctx->mode = mode;
  513. if ((mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB &&
  514. !(mode & TDES_FLAGS_ENCRYPT)) {
  515. unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  516. if (req->cryptlen >= ivsize)
  517. scatterwalk_map_and_copy(rctx->lastc, req->src,
  518. req->cryptlen - ivsize,
  519. ivsize, 0);
  520. }
  521. return atmel_tdes_handle_queue(ctx->dd, req);
  522. }
  523. static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
  524. {
  525. int ret;
  526. /* Try to grab 2 DMA channels */
  527. dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
  528. if (IS_ERR(dd->dma_lch_in.chan)) {
  529. ret = PTR_ERR(dd->dma_lch_in.chan);
  530. goto err_dma_in;
  531. }
  532. dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
  533. TDES_IDATA1R;
  534. dd->dma_lch_in.dma_conf.src_maxburst = 1;
  535. dd->dma_lch_in.dma_conf.src_addr_width =
  536. DMA_SLAVE_BUSWIDTH_4_BYTES;
  537. dd->dma_lch_in.dma_conf.dst_maxburst = 1;
  538. dd->dma_lch_in.dma_conf.dst_addr_width =
  539. DMA_SLAVE_BUSWIDTH_4_BYTES;
  540. dd->dma_lch_in.dma_conf.device_fc = false;
  541. dd->dma_lch_out.chan = dma_request_chan(dd->dev, "rx");
  542. if (IS_ERR(dd->dma_lch_out.chan)) {
  543. ret = PTR_ERR(dd->dma_lch_out.chan);
  544. goto err_dma_out;
  545. }
  546. dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
  547. TDES_ODATA1R;
  548. dd->dma_lch_out.dma_conf.src_maxburst = 1;
  549. dd->dma_lch_out.dma_conf.src_addr_width =
  550. DMA_SLAVE_BUSWIDTH_4_BYTES;
  551. dd->dma_lch_out.dma_conf.dst_maxburst = 1;
  552. dd->dma_lch_out.dma_conf.dst_addr_width =
  553. DMA_SLAVE_BUSWIDTH_4_BYTES;
  554. dd->dma_lch_out.dma_conf.device_fc = false;
  555. return 0;
  556. err_dma_out:
  557. dma_release_channel(dd->dma_lch_in.chan);
  558. err_dma_in:
  559. dev_err(dd->dev, "no DMA channel available\n");
  560. return ret;
  561. }
  562. static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
  563. {
  564. dma_release_channel(dd->dma_lch_in.chan);
  565. dma_release_channel(dd->dma_lch_out.chan);
  566. }
  567. static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
  568. unsigned int keylen)
  569. {
  570. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
  571. int err;
  572. err = verify_skcipher_des_key(tfm, key);
  573. if (err)
  574. return err;
  575. memcpy(ctx->key, key, keylen);
  576. ctx->keylen = keylen;
  577. return 0;
  578. }
  579. static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  580. unsigned int keylen)
  581. {
  582. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
  583. int err;
  584. err = verify_skcipher_des3_key(tfm, key);
  585. if (err)
  586. return err;
  587. memcpy(ctx->key, key, keylen);
  588. ctx->keylen = keylen;
  589. return 0;
  590. }
  591. static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
  592. {
  593. return atmel_tdes_crypt(req, TDES_FLAGS_ECB | TDES_FLAGS_ENCRYPT);
  594. }
  595. static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
  596. {
  597. return atmel_tdes_crypt(req, TDES_FLAGS_ECB);
  598. }
  599. static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
  600. {
  601. return atmel_tdes_crypt(req, TDES_FLAGS_CBC | TDES_FLAGS_ENCRYPT);
  602. }
  603. static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
  604. {
  605. return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
  606. }
  607. static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
  608. {
  609. struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
  610. ctx->dd = atmel_tdes_dev_alloc();
  611. if (!ctx->dd)
  612. return -ENODEV;
  613. crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
  614. return 0;
  615. }
  616. static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
  617. {
  618. alg->base.cra_priority = ATMEL_TDES_PRIORITY;
  619. alg->base.cra_flags = CRYPTO_ALG_ASYNC;
  620. alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
  621. alg->base.cra_module = THIS_MODULE;
  622. alg->init = atmel_tdes_init_tfm;
  623. }
  624. static struct skcipher_alg tdes_algs[] = {
  625. {
  626. .base.cra_name = "ecb(des)",
  627. .base.cra_driver_name = "atmel-ecb-des",
  628. .base.cra_blocksize = DES_BLOCK_SIZE,
  629. .base.cra_alignmask = 0x7,
  630. .min_keysize = DES_KEY_SIZE,
  631. .max_keysize = DES_KEY_SIZE,
  632. .setkey = atmel_des_setkey,
  633. .encrypt = atmel_tdes_ecb_encrypt,
  634. .decrypt = atmel_tdes_ecb_decrypt,
  635. },
  636. {
  637. .base.cra_name = "cbc(des)",
  638. .base.cra_driver_name = "atmel-cbc-des",
  639. .base.cra_blocksize = DES_BLOCK_SIZE,
  640. .base.cra_alignmask = 0x7,
  641. .min_keysize = DES_KEY_SIZE,
  642. .max_keysize = DES_KEY_SIZE,
  643. .ivsize = DES_BLOCK_SIZE,
  644. .setkey = atmel_des_setkey,
  645. .encrypt = atmel_tdes_cbc_encrypt,
  646. .decrypt = atmel_tdes_cbc_decrypt,
  647. },
  648. {
  649. .base.cra_name = "ecb(des3_ede)",
  650. .base.cra_driver_name = "atmel-ecb-tdes",
  651. .base.cra_blocksize = DES_BLOCK_SIZE,
  652. .base.cra_alignmask = 0x7,
  653. .min_keysize = DES3_EDE_KEY_SIZE,
  654. .max_keysize = DES3_EDE_KEY_SIZE,
  655. .setkey = atmel_tdes_setkey,
  656. .encrypt = atmel_tdes_ecb_encrypt,
  657. .decrypt = atmel_tdes_ecb_decrypt,
  658. },
  659. {
  660. .base.cra_name = "cbc(des3_ede)",
  661. .base.cra_driver_name = "atmel-cbc-tdes",
  662. .base.cra_blocksize = DES_BLOCK_SIZE,
  663. .base.cra_alignmask = 0x7,
  664. .min_keysize = DES3_EDE_KEY_SIZE,
  665. .max_keysize = DES3_EDE_KEY_SIZE,
  666. .setkey = atmel_tdes_setkey,
  667. .encrypt = atmel_tdes_cbc_encrypt,
  668. .decrypt = atmel_tdes_cbc_decrypt,
  669. .ivsize = DES_BLOCK_SIZE,
  670. },
  671. };
  672. static void atmel_tdes_queue_task(unsigned long data)
  673. {
  674. struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
  675. atmel_tdes_handle_queue(dd, NULL);
  676. }
  677. static void atmel_tdes_done_task(unsigned long data)
  678. {
  679. struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
  680. int err;
  681. if (!(dd->flags & TDES_FLAGS_DMA))
  682. err = atmel_tdes_crypt_pdc_stop(dd);
  683. else
  684. err = atmel_tdes_crypt_dma_stop(dd);
  685. if (dd->total && !err) {
  686. if (dd->flags & TDES_FLAGS_FAST) {
  687. dd->in_sg = sg_next(dd->in_sg);
  688. dd->out_sg = sg_next(dd->out_sg);
  689. if (!dd->in_sg || !dd->out_sg)
  690. err = -EINVAL;
  691. }
  692. if (!err)
  693. err = atmel_tdes_crypt_start(dd);
  694. if (!err)
  695. return; /* DMA started. Not fininishing. */
  696. }
  697. atmel_tdes_finish_req(dd, err);
  698. atmel_tdes_handle_queue(dd, NULL);
  699. }
  700. static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
  701. {
  702. struct atmel_tdes_dev *tdes_dd = dev_id;
  703. u32 reg;
  704. reg = atmel_tdes_read(tdes_dd, TDES_ISR);
  705. if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
  706. atmel_tdes_write(tdes_dd, TDES_IDR, reg);
  707. if (TDES_FLAGS_BUSY & tdes_dd->flags)
  708. tasklet_schedule(&tdes_dd->done_task);
  709. else
  710. dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
  711. return IRQ_HANDLED;
  712. }
  713. return IRQ_NONE;
  714. }
  715. static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
  716. {
  717. int i;
  718. for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
  719. crypto_unregister_skcipher(&tdes_algs[i]);
  720. }
  721. static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
  722. {
  723. int err, i, j;
  724. for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
  725. atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
  726. err = crypto_register_skcipher(&tdes_algs[i]);
  727. if (err)
  728. goto err_tdes_algs;
  729. }
  730. return 0;
  731. err_tdes_algs:
  732. for (j = 0; j < i; j++)
  733. crypto_unregister_skcipher(&tdes_algs[j]);
  734. return err;
  735. }
  736. static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
  737. {
  738. dd->caps.has_dma = 0;
  739. /* keep only major version number */
  740. switch (dd->hw_version & 0xf00) {
  741. case 0x800:
  742. case 0x700:
  743. dd->caps.has_dma = 1;
  744. break;
  745. case 0x600:
  746. break;
  747. default:
  748. dev_warn(dd->dev,
  749. "Unmanaged tdes version, set minimum capabilities\n");
  750. break;
  751. }
  752. }
  753. static const struct of_device_id atmel_tdes_dt_ids[] = {
  754. { .compatible = "atmel,at91sam9g46-tdes" },
  755. { /* sentinel */ }
  756. };
  757. MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
  758. static int atmel_tdes_probe(struct platform_device *pdev)
  759. {
  760. struct atmel_tdes_dev *tdes_dd;
  761. struct device *dev = &pdev->dev;
  762. struct resource *tdes_res;
  763. int err;
  764. tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
  765. if (!tdes_dd)
  766. return -ENOMEM;
  767. tdes_dd->dev = dev;
  768. platform_set_drvdata(pdev, tdes_dd);
  769. INIT_LIST_HEAD(&tdes_dd->list);
  770. spin_lock_init(&tdes_dd->lock);
  771. tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
  772. (unsigned long)tdes_dd);
  773. tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
  774. (unsigned long)tdes_dd);
  775. crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
  776. tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
  777. if (IS_ERR(tdes_dd->io_base)) {
  778. err = PTR_ERR(tdes_dd->io_base);
  779. goto err_tasklet_kill;
  780. }
  781. tdes_dd->phys_base = tdes_res->start;
  782. /* Get the IRQ */
  783. tdes_dd->irq = platform_get_irq(pdev, 0);
  784. if (tdes_dd->irq < 0) {
  785. err = tdes_dd->irq;
  786. goto err_tasklet_kill;
  787. }
  788. err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
  789. IRQF_SHARED, "atmel-tdes", tdes_dd);
  790. if (err) {
  791. dev_err(dev, "unable to request tdes irq.\n");
  792. goto err_tasklet_kill;
  793. }
  794. /* Initializing the clock */
  795. tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
  796. if (IS_ERR(tdes_dd->iclk)) {
  797. dev_err(dev, "clock initialization failed.\n");
  798. err = PTR_ERR(tdes_dd->iclk);
  799. goto err_tasklet_kill;
  800. }
  801. err = atmel_tdes_hw_version_init(tdes_dd);
  802. if (err)
  803. goto err_tasklet_kill;
  804. atmel_tdes_get_cap(tdes_dd);
  805. err = atmel_tdes_buff_init(tdes_dd);
  806. if (err)
  807. goto err_tasklet_kill;
  808. if (tdes_dd->caps.has_dma) {
  809. err = atmel_tdes_dma_init(tdes_dd);
  810. if (err)
  811. goto err_buff_cleanup;
  812. dev_info(dev, "using %s, %s for DMA transfers\n",
  813. dma_chan_name(tdes_dd->dma_lch_in.chan),
  814. dma_chan_name(tdes_dd->dma_lch_out.chan));
  815. }
  816. spin_lock(&atmel_tdes.lock);
  817. list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
  818. spin_unlock(&atmel_tdes.lock);
  819. err = atmel_tdes_register_algs(tdes_dd);
  820. if (err)
  821. goto err_algs;
  822. dev_info(dev, "Atmel DES/TDES\n");
  823. return 0;
  824. err_algs:
  825. spin_lock(&atmel_tdes.lock);
  826. list_del(&tdes_dd->list);
  827. spin_unlock(&atmel_tdes.lock);
  828. if (tdes_dd->caps.has_dma)
  829. atmel_tdes_dma_cleanup(tdes_dd);
  830. err_buff_cleanup:
  831. atmel_tdes_buff_cleanup(tdes_dd);
  832. err_tasklet_kill:
  833. tasklet_kill(&tdes_dd->done_task);
  834. tasklet_kill(&tdes_dd->queue_task);
  835. return err;
  836. }
  837. static void atmel_tdes_remove(struct platform_device *pdev)
  838. {
  839. struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
  840. spin_lock(&atmel_tdes.lock);
  841. list_del(&tdes_dd->list);
  842. spin_unlock(&atmel_tdes.lock);
  843. atmel_tdes_unregister_algs(tdes_dd);
  844. tasklet_kill(&tdes_dd->done_task);
  845. tasklet_kill(&tdes_dd->queue_task);
  846. if (tdes_dd->caps.has_dma)
  847. atmel_tdes_dma_cleanup(tdes_dd);
  848. atmel_tdes_buff_cleanup(tdes_dd);
  849. }
  850. static struct platform_driver atmel_tdes_driver = {
  851. .probe = atmel_tdes_probe,
  852. .remove_new = atmel_tdes_remove,
  853. .driver = {
  854. .name = "atmel_tdes",
  855. .of_match_table = atmel_tdes_dt_ids,
  856. },
  857. };
  858. module_platform_driver(atmel_tdes_driver);
  859. MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
  860. MODULE_LICENSE("GPL v2");
  861. MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");