memory.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
  3. * Copyright (c) by Takashi Iwai <tiwai@suse.de>
  4. *
  5. * EMU10K1 memory page allocation (PTB area)
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/pci.h>
  24. #include <linux/gfp.h>
  25. #include <linux/time.h>
  26. #include <linux/mutex.h>
  27. #include <linux/export.h>
  28. #include <sound/core.h>
  29. #include <sound/emu10k1.h>
  30. /* page arguments of these two macros are Emu page (4096 bytes), not like
  31. * aligned pages in others
  32. */
  33. #define __set_ptb_entry(emu,page,addr) \
  34. (((__le32 *)(emu)->ptb_pages.area)[page] = \
  35. cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
  36. #define __get_ptb_entry(emu, page) \
  37. (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
  38. #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
  39. #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
  40. #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
  41. /* get aligned page from offset address */
  42. #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
  43. /* get offset address from aligned page */
  44. #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
  45. #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
  46. /* fill PTB entrie(s) corresponding to page with addr */
  47. #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
  48. /* fill PTB entrie(s) corresponding to page with silence pointer */
  49. #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
  50. #else
  51. /* fill PTB entries -- we need to fill UNIT_PAGES entries */
  52. static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
  53. {
  54. int i;
  55. page *= UNIT_PAGES;
  56. for (i = 0; i < UNIT_PAGES; i++, page++) {
  57. __set_ptb_entry(emu, page, addr);
  58. dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
  59. (unsigned int)__get_ptb_entry(emu, page));
  60. addr += EMUPAGESIZE;
  61. }
  62. }
  63. static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
  64. {
  65. int i;
  66. page *= UNIT_PAGES;
  67. for (i = 0; i < UNIT_PAGES; i++, page++) {
  68. /* do not increment ptr */
  69. __set_ptb_entry(emu, page, emu->silent_page.addr);
  70. dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
  71. page, (unsigned int)__get_ptb_entry(emu, page));
  72. }
  73. }
  74. #endif /* PAGE_SIZE */
  75. /*
  76. */
  77. static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  78. static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
  79. #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
  80. /* initialize emu10k1 part */
  81. static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
  82. {
  83. blk->mapped_page = -1;
  84. INIT_LIST_HEAD(&blk->mapped_link);
  85. INIT_LIST_HEAD(&blk->mapped_order_link);
  86. blk->map_locked = 0;
  87. blk->first_page = get_aligned_page(blk->mem.offset);
  88. blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
  89. blk->pages = blk->last_page - blk->first_page + 1;
  90. }
  91. /*
  92. * search empty region on PTB with the given size
  93. *
  94. * if an empty region is found, return the page and store the next mapped block
  95. * in nextp
  96. * if not found, return a negative error code.
  97. */
  98. static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
  99. {
  100. int page = 1, found_page = -ENOMEM;
  101. int max_size = npages;
  102. int size;
  103. struct list_head *candidate = &emu->mapped_link_head;
  104. struct list_head *pos;
  105. list_for_each (pos, &emu->mapped_link_head) {
  106. struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
  107. if (blk->mapped_page < 0)
  108. continue;
  109. size = blk->mapped_page - page;
  110. if (size == npages) {
  111. *nextp = pos;
  112. return page;
  113. }
  114. else if (size > max_size) {
  115. /* we look for the maximum empty hole */
  116. max_size = size;
  117. candidate = pos;
  118. found_page = page;
  119. }
  120. page = blk->mapped_page + blk->pages;
  121. }
  122. size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
  123. if (size >= max_size) {
  124. *nextp = pos;
  125. return page;
  126. }
  127. *nextp = candidate;
  128. return found_page;
  129. }
  130. /*
  131. * map a memory block onto emu10k1's PTB
  132. *
  133. * call with memblk_lock held
  134. */
  135. static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  136. {
  137. int page, pg;
  138. struct list_head *next;
  139. page = search_empty_map_area(emu, blk->pages, &next);
  140. if (page < 0) /* not found */
  141. return page;
  142. if (page == 0) {
  143. dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
  144. return -EINVAL;
  145. }
  146. /* insert this block in the proper position of mapped list */
  147. list_add_tail(&blk->mapped_link, next);
  148. /* append this as a newest block in order list */
  149. list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
  150. blk->mapped_page = page;
  151. /* fill PTB */
  152. for (pg = blk->first_page; pg <= blk->last_page; pg++) {
  153. set_ptb_entry(emu, page, emu->page_addr_table[pg]);
  154. page++;
  155. }
  156. return 0;
  157. }
  158. /*
  159. * unmap the block
  160. * return the size of resultant empty pages
  161. *
  162. * call with memblk_lock held
  163. */
  164. static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  165. {
  166. int start_page, end_page, mpage, pg;
  167. struct list_head *p;
  168. struct snd_emu10k1_memblk *q;
  169. /* calculate the expected size of empty region */
  170. if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
  171. q = get_emu10k1_memblk(p, mapped_link);
  172. start_page = q->mapped_page + q->pages;
  173. } else
  174. start_page = 1;
  175. if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
  176. q = get_emu10k1_memblk(p, mapped_link);
  177. end_page = q->mapped_page;
  178. } else
  179. end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
  180. /* remove links */
  181. list_del(&blk->mapped_link);
  182. list_del(&blk->mapped_order_link);
  183. /* clear PTB */
  184. mpage = blk->mapped_page;
  185. for (pg = blk->first_page; pg <= blk->last_page; pg++) {
  186. set_silent_ptb(emu, mpage);
  187. mpage++;
  188. }
  189. blk->mapped_page = -1;
  190. return end_page - start_page; /* return the new empty size */
  191. }
  192. /*
  193. * search empty pages with the given size, and create a memory block
  194. *
  195. * unlike synth_alloc the memory block is aligned to the page start
  196. */
  197. static struct snd_emu10k1_memblk *
  198. search_empty(struct snd_emu10k1 *emu, int size)
  199. {
  200. struct list_head *p;
  201. struct snd_emu10k1_memblk *blk;
  202. int page, psize;
  203. psize = get_aligned_page(size + PAGE_SIZE -1);
  204. page = 0;
  205. list_for_each(p, &emu->memhdr->block) {
  206. blk = get_emu10k1_memblk(p, mem.list);
  207. if (page + psize <= blk->first_page)
  208. goto __found_pages;
  209. page = blk->last_page + 1;
  210. }
  211. if (page + psize > emu->max_cache_pages)
  212. return NULL;
  213. __found_pages:
  214. /* create a new memory block */
  215. blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
  216. if (blk == NULL)
  217. return NULL;
  218. blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
  219. emu10k1_memblk_init(blk);
  220. return blk;
  221. }
  222. /*
  223. * check if the given pointer is valid for pages
  224. */
  225. static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
  226. {
  227. if (addr & ~emu->dma_mask) {
  228. dev_err_ratelimited(emu->card->dev,
  229. "max memory size is 0x%lx (addr = 0x%lx)!!\n",
  230. emu->dma_mask, (unsigned long)addr);
  231. return 0;
  232. }
  233. if (addr & (EMUPAGESIZE-1)) {
  234. dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
  235. return 0;
  236. }
  237. return 1;
  238. }
  239. /*
  240. * map the given memory block on PTB.
  241. * if the block is already mapped, update the link order.
  242. * if no empty pages are found, tries to release unused memory blocks
  243. * and retry the mapping.
  244. */
  245. int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  246. {
  247. int err;
  248. int size;
  249. struct list_head *p, *nextp;
  250. struct snd_emu10k1_memblk *deleted;
  251. unsigned long flags;
  252. spin_lock_irqsave(&emu->memblk_lock, flags);
  253. if (blk->mapped_page >= 0) {
  254. /* update order link */
  255. list_move_tail(&blk->mapped_order_link,
  256. &emu->mapped_order_link_head);
  257. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  258. return 0;
  259. }
  260. if ((err = map_memblk(emu, blk)) < 0) {
  261. /* no enough page - try to unmap some blocks */
  262. /* starting from the oldest block */
  263. p = emu->mapped_order_link_head.next;
  264. for (; p != &emu->mapped_order_link_head; p = nextp) {
  265. nextp = p->next;
  266. deleted = get_emu10k1_memblk(p, mapped_order_link);
  267. if (deleted->map_locked)
  268. continue;
  269. size = unmap_memblk(emu, deleted);
  270. if (size >= blk->pages) {
  271. /* ok the empty region is enough large */
  272. err = map_memblk(emu, blk);
  273. break;
  274. }
  275. }
  276. }
  277. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  278. return err;
  279. }
  280. EXPORT_SYMBOL(snd_emu10k1_memblk_map);
  281. /*
  282. * page allocation for DMA
  283. */
  284. struct snd_util_memblk *
  285. snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
  286. {
  287. struct snd_pcm_runtime *runtime = substream->runtime;
  288. struct snd_util_memhdr *hdr;
  289. struct snd_emu10k1_memblk *blk;
  290. int page, err, idx;
  291. if (snd_BUG_ON(!emu))
  292. return NULL;
  293. if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
  294. runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
  295. return NULL;
  296. hdr = emu->memhdr;
  297. if (snd_BUG_ON(!hdr))
  298. return NULL;
  299. idx = runtime->period_size >= runtime->buffer_size ?
  300. (emu->delay_pcm_irq * 2) : 0;
  301. mutex_lock(&hdr->block_mutex);
  302. blk = search_empty(emu, runtime->dma_bytes + idx);
  303. if (blk == NULL) {
  304. mutex_unlock(&hdr->block_mutex);
  305. return NULL;
  306. }
  307. /* fill buffer addresses but pointers are not stored so that
  308. * snd_free_pci_page() is not called in in synth_free()
  309. */
  310. idx = 0;
  311. for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
  312. unsigned long ofs = idx << PAGE_SHIFT;
  313. dma_addr_t addr;
  314. if (ofs >= runtime->dma_bytes)
  315. addr = emu->silent_page.addr;
  316. else
  317. addr = snd_pcm_sgbuf_get_addr(substream, ofs);
  318. if (! is_valid_page(emu, addr)) {
  319. dev_err_ratelimited(emu->card->dev,
  320. "emu: failure page = %d\n", idx);
  321. mutex_unlock(&hdr->block_mutex);
  322. return NULL;
  323. }
  324. emu->page_addr_table[page] = addr;
  325. emu->page_ptr_table[page] = NULL;
  326. }
  327. /* set PTB entries */
  328. blk->map_locked = 1; /* do not unmap this block! */
  329. err = snd_emu10k1_memblk_map(emu, blk);
  330. if (err < 0) {
  331. __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
  332. mutex_unlock(&hdr->block_mutex);
  333. return NULL;
  334. }
  335. mutex_unlock(&hdr->block_mutex);
  336. return (struct snd_util_memblk *)blk;
  337. }
  338. /*
  339. * release DMA buffer from page table
  340. */
  341. int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
  342. {
  343. if (snd_BUG_ON(!emu || !blk))
  344. return -EINVAL;
  345. return snd_emu10k1_synth_free(emu, blk);
  346. }
  347. /*
  348. * allocate DMA pages, widening the allocation if necessary
  349. *
  350. * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
  351. * this might be needed.
  352. *
  353. * If you modify this function check whether __synth_free_pages() also needs
  354. * changes.
  355. */
  356. int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
  357. struct snd_dma_buffer *dmab)
  358. {
  359. if (emu->iommu_workaround) {
  360. size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  361. size_t size_real = npages * PAGE_SIZE;
  362. /*
  363. * The device has been observed to accesses up to 256 extra
  364. * bytes, but use 1k to be safe.
  365. */
  366. if (size_real < size + 1024)
  367. size += PAGE_SIZE;
  368. }
  369. return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
  370. snd_dma_pci_data(emu->pci), size, dmab);
  371. }
  372. /*
  373. * memory allocation using multiple pages (for synth)
  374. * Unlike the DMA allocation above, non-contiguous pages are assined.
  375. */
  376. /*
  377. * allocate a synth sample area
  378. */
  379. struct snd_util_memblk *
  380. snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
  381. {
  382. struct snd_emu10k1_memblk *blk;
  383. struct snd_util_memhdr *hdr = hw->memhdr;
  384. mutex_lock(&hdr->block_mutex);
  385. blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
  386. if (blk == NULL) {
  387. mutex_unlock(&hdr->block_mutex);
  388. return NULL;
  389. }
  390. if (synth_alloc_pages(hw, blk)) {
  391. __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
  392. mutex_unlock(&hdr->block_mutex);
  393. return NULL;
  394. }
  395. snd_emu10k1_memblk_map(hw, blk);
  396. mutex_unlock(&hdr->block_mutex);
  397. return (struct snd_util_memblk *)blk;
  398. }
  399. EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
  400. /*
  401. * free a synth sample area
  402. */
  403. int
  404. snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
  405. {
  406. struct snd_util_memhdr *hdr = emu->memhdr;
  407. struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
  408. unsigned long flags;
  409. mutex_lock(&hdr->block_mutex);
  410. spin_lock_irqsave(&emu->memblk_lock, flags);
  411. if (blk->mapped_page >= 0)
  412. unmap_memblk(emu, blk);
  413. spin_unlock_irqrestore(&emu->memblk_lock, flags);
  414. synth_free_pages(emu, blk);
  415. __snd_util_mem_free(hdr, memblk);
  416. mutex_unlock(&hdr->block_mutex);
  417. return 0;
  418. }
  419. EXPORT_SYMBOL(snd_emu10k1_synth_free);
  420. /* check new allocation range */
  421. static void get_single_page_range(struct snd_util_memhdr *hdr,
  422. struct snd_emu10k1_memblk *blk,
  423. int *first_page_ret, int *last_page_ret)
  424. {
  425. struct list_head *p;
  426. struct snd_emu10k1_memblk *q;
  427. int first_page, last_page;
  428. first_page = blk->first_page;
  429. if ((p = blk->mem.list.prev) != &hdr->block) {
  430. q = get_emu10k1_memblk(p, mem.list);
  431. if (q->last_page == first_page)
  432. first_page++; /* first page was already allocated */
  433. }
  434. last_page = blk->last_page;
  435. if ((p = blk->mem.list.next) != &hdr->block) {
  436. q = get_emu10k1_memblk(p, mem.list);
  437. if (q->first_page == last_page)
  438. last_page--; /* last page was already allocated */
  439. }
  440. *first_page_ret = first_page;
  441. *last_page_ret = last_page;
  442. }
  443. /* release allocated pages */
  444. static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
  445. int last_page)
  446. {
  447. struct snd_dma_buffer dmab;
  448. int page;
  449. dmab.dev.type = SNDRV_DMA_TYPE_DEV;
  450. dmab.dev.dev = snd_dma_pci_data(emu->pci);
  451. for (page = first_page; page <= last_page; page++) {
  452. if (emu->page_ptr_table[page] == NULL)
  453. continue;
  454. dmab.area = emu->page_ptr_table[page];
  455. dmab.addr = emu->page_addr_table[page];
  456. /*
  457. * please keep me in sync with logic in
  458. * snd_emu10k1_alloc_pages_maybe_wider()
  459. */
  460. dmab.bytes = PAGE_SIZE;
  461. if (emu->iommu_workaround)
  462. dmab.bytes *= 2;
  463. snd_dma_free_pages(&dmab);
  464. emu->page_addr_table[page] = 0;
  465. emu->page_ptr_table[page] = NULL;
  466. }
  467. }
  468. /*
  469. * allocate kernel pages
  470. */
  471. static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  472. {
  473. int page, first_page, last_page;
  474. struct snd_dma_buffer dmab;
  475. emu10k1_memblk_init(blk);
  476. get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
  477. /* allocate kernel pages */
  478. for (page = first_page; page <= last_page; page++) {
  479. if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
  480. &dmab) < 0)
  481. goto __fail;
  482. if (!is_valid_page(emu, dmab.addr)) {
  483. snd_dma_free_pages(&dmab);
  484. goto __fail;
  485. }
  486. emu->page_addr_table[page] = dmab.addr;
  487. emu->page_ptr_table[page] = dmab.area;
  488. }
  489. return 0;
  490. __fail:
  491. /* release allocated pages */
  492. last_page = page - 1;
  493. __synth_free_pages(emu, first_page, last_page);
  494. return -ENOMEM;
  495. }
  496. /*
  497. * free pages
  498. */
  499. static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
  500. {
  501. int first_page, last_page;
  502. get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
  503. __synth_free_pages(emu, first_page, last_page);
  504. return 0;
  505. }
  506. /* calculate buffer pointer from offset address */
  507. static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
  508. {
  509. char *ptr;
  510. if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
  511. return NULL;
  512. ptr = emu->page_ptr_table[page];
  513. if (! ptr) {
  514. dev_err(emu->card->dev,
  515. "access to NULL ptr: page = %d\n", page);
  516. return NULL;
  517. }
  518. ptr += offset & (PAGE_SIZE - 1);
  519. return (void*)ptr;
  520. }
  521. /*
  522. * bzero(blk + offset, size)
  523. */
  524. int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
  525. int offset, int size)
  526. {
  527. int page, nextofs, end_offset, temp, temp1;
  528. void *ptr;
  529. struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
  530. offset += blk->offset & (PAGE_SIZE - 1);
  531. end_offset = offset + size;
  532. page = get_aligned_page(offset);
  533. do {
  534. nextofs = aligned_page_offset(page + 1);
  535. temp = nextofs - offset;
  536. temp1 = end_offset - offset;
  537. if (temp1 < temp)
  538. temp = temp1;
  539. ptr = offset_ptr(emu, page + p->first_page, offset);
  540. if (ptr)
  541. memset(ptr, 0, temp);
  542. offset = nextofs;
  543. page++;
  544. } while (offset < end_offset);
  545. return 0;
  546. }
  547. EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
  548. /*
  549. * copy_from_user(blk + offset, data, size)
  550. */
  551. int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
  552. int offset, const char __user *data, int size)
  553. {
  554. int page, nextofs, end_offset, temp, temp1;
  555. void *ptr;
  556. struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
  557. offset += blk->offset & (PAGE_SIZE - 1);
  558. end_offset = offset + size;
  559. page = get_aligned_page(offset);
  560. do {
  561. nextofs = aligned_page_offset(page + 1);
  562. temp = nextofs - offset;
  563. temp1 = end_offset - offset;
  564. if (temp1 < temp)
  565. temp = temp1;
  566. ptr = offset_ptr(emu, page + p->first_page, offset);
  567. if (ptr && copy_from_user(ptr, data, temp))
  568. return -EFAULT;
  569. offset = nextofs;
  570. data += temp;
  571. page++;
  572. } while (offset < end_offset);
  573. return 0;
  574. }
  575. EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);