msu.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Intel(R) Trace Hub Memory Storage Unit
  4. *
  5. * Copyright (C) 2014-2015 Intel Corporation.
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/types.h>
  9. #include <linux/module.h>
  10. #include <linux/device.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/sizes.h>
  13. #include <linux/printk.h>
  14. #include <linux/slab.h>
  15. #include <linux/mm.h>
  16. #include <linux/fs.h>
  17. #include <linux/io.h>
  18. #include <linux/workqueue.h>
  19. #include <linux/dma-mapping.h>
  20. #ifdef CONFIG_X86
  21. #include <asm/set_memory.h>
  22. #endif
  23. #include <linux/intel_th.h>
  24. #include "intel_th.h"
  25. #include "msu.h"
  26. #define msc_dev(x) (&(x)->thdev->dev)
  27. /*
  28. * Lockout state transitions:
  29. * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
  30. * \-----------/
  31. * WIN_READY: window can be used by HW
  32. * WIN_INUSE: window is in use
  33. * WIN_LOCKED: window is filled up and is being processed by the buffer
  34. * handling code
  35. *
  36. * All state transitions happen automatically, except for the LOCKED->READY,
  37. * which needs to be signalled by the buffer code by calling
  38. * intel_th_msc_window_unlock().
  39. *
  40. * When the interrupt handler has to switch to the next window, it checks
  41. * whether it's READY, and if it is, it performs the switch and tracing
  42. * continues. If it's LOCKED, it stops the trace.
  43. */
  44. enum lockout_state {
  45. WIN_READY = 0,
  46. WIN_INUSE,
  47. WIN_LOCKED
  48. };
  49. /**
  50. * struct msc_window - multiblock mode window descriptor
  51. * @entry: window list linkage (msc::win_list)
  52. * @pgoff: page offset into the buffer that this window starts at
  53. * @lockout: lockout state, see comment below
  54. * @lo_lock: lockout state serialization
  55. * @nr_blocks: number of blocks (pages) in this window
  56. * @nr_segs: number of segments in this window (<= @nr_blocks)
  57. * @msc: pointer to the MSC device
  58. * @_sgt: array of block descriptors
  59. * @sgt: array of block descriptors
  60. */
  61. struct msc_window {
  62. struct list_head entry;
  63. unsigned long pgoff;
  64. enum lockout_state lockout;
  65. spinlock_t lo_lock;
  66. unsigned int nr_blocks;
  67. unsigned int nr_segs;
  68. struct msc *msc;
  69. struct sg_table _sgt;
  70. struct sg_table *sgt;
  71. };
  72. /**
  73. * struct msc_iter - iterator for msc buffer
  74. * @entry: msc::iter_list linkage
  75. * @msc: pointer to the MSC device
  76. * @start_win: oldest window
  77. * @win: current window
  78. * @offset: current logical offset into the buffer
  79. * @start_block: oldest block in the window
  80. * @block: block number in the window
  81. * @block_off: offset into current block
  82. * @wrap_count: block wrapping handling
  83. * @eof: end of buffer reached
  84. */
  85. struct msc_iter {
  86. struct list_head entry;
  87. struct msc *msc;
  88. struct msc_window *start_win;
  89. struct msc_window *win;
  90. unsigned long offset;
  91. struct scatterlist *start_block;
  92. struct scatterlist *block;
  93. unsigned int block_off;
  94. unsigned int wrap_count;
  95. unsigned int eof;
  96. };
  97. /**
  98. * struct msc - MSC device representation
  99. * @reg_base: register window base address
  100. * @thdev: intel_th_device pointer
  101. * @mbuf: MSU buffer, if assigned
  102. * @mbuf_priv MSU buffer's private data, if @mbuf
  103. * @win_list: list of windows in multiblock mode
  104. * @single_sgt: single mode buffer
  105. * @cur_win: current window
  106. * @nr_pages: total number of pages allocated for this buffer
  107. * @single_sz: amount of data in single mode
  108. * @single_wrap: single mode wrap occurred
  109. * @base: buffer's base pointer
  110. * @base_addr: buffer's base address
  111. * @user_count: number of users of the buffer
  112. * @mmap_count: number of mappings
  113. * @buf_mutex: mutex to serialize access to buffer-related bits
  114. * @enabled: MSC is enabled
  115. * @wrap: wrapping is enabled
  116. * @mode: MSC operating mode
  117. * @burst_len: write burst length
  118. * @index: number of this MSC in the MSU
  119. */
  120. struct msc {
  121. void __iomem *reg_base;
  122. void __iomem *msu_base;
  123. struct intel_th_device *thdev;
  124. const struct msu_buffer *mbuf;
  125. void *mbuf_priv;
  126. struct work_struct work;
  127. struct list_head win_list;
  128. struct sg_table single_sgt;
  129. struct msc_window *cur_win;
  130. struct msc_window *switch_on_unlock;
  131. unsigned long nr_pages;
  132. unsigned long single_sz;
  133. unsigned int single_wrap : 1;
  134. void *base;
  135. dma_addr_t base_addr;
  136. u32 orig_addr;
  137. u32 orig_sz;
  138. /* <0: no buffer, 0: no users, >0: active users */
  139. atomic_t user_count;
  140. atomic_t mmap_count;
  141. struct mutex buf_mutex;
  142. struct list_head iter_list;
  143. bool stop_on_full;
  144. /* config */
  145. unsigned int enabled : 1,
  146. wrap : 1,
  147. do_irq : 1,
  148. multi_is_broken : 1;
  149. unsigned int mode;
  150. unsigned int burst_len;
  151. unsigned int index;
  152. };
  153. static LIST_HEAD(msu_buffer_list);
  154. static DEFINE_MUTEX(msu_buffer_mutex);
  155. /**
  156. * struct msu_buffer_entry - internal MSU buffer bookkeeping
  157. * @entry: link to msu_buffer_list
  158. * @mbuf: MSU buffer object
  159. * @owner: module that provides this MSU buffer
  160. */
  161. struct msu_buffer_entry {
  162. struct list_head entry;
  163. const struct msu_buffer *mbuf;
  164. struct module *owner;
  165. };
  166. static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
  167. {
  168. struct msu_buffer_entry *mbe;
  169. lockdep_assert_held(&msu_buffer_mutex);
  170. list_for_each_entry(mbe, &msu_buffer_list, entry) {
  171. if (!strcmp(mbe->mbuf->name, name))
  172. return mbe;
  173. }
  174. return NULL;
  175. }
  176. static const struct msu_buffer *
  177. msu_buffer_get(const char *name)
  178. {
  179. struct msu_buffer_entry *mbe;
  180. mutex_lock(&msu_buffer_mutex);
  181. mbe = __msu_buffer_entry_find(name);
  182. if (mbe && !try_module_get(mbe->owner))
  183. mbe = NULL;
  184. mutex_unlock(&msu_buffer_mutex);
  185. return mbe ? mbe->mbuf : NULL;
  186. }
  187. static void msu_buffer_put(const struct msu_buffer *mbuf)
  188. {
  189. struct msu_buffer_entry *mbe;
  190. mutex_lock(&msu_buffer_mutex);
  191. mbe = __msu_buffer_entry_find(mbuf->name);
  192. if (mbe)
  193. module_put(mbe->owner);
  194. mutex_unlock(&msu_buffer_mutex);
  195. }
  196. int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
  197. struct module *owner)
  198. {
  199. struct msu_buffer_entry *mbe;
  200. int ret = 0;
  201. mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
  202. if (!mbe)
  203. return -ENOMEM;
  204. mutex_lock(&msu_buffer_mutex);
  205. if (__msu_buffer_entry_find(mbuf->name)) {
  206. ret = -EEXIST;
  207. kfree(mbe);
  208. goto unlock;
  209. }
  210. mbe->mbuf = mbuf;
  211. mbe->owner = owner;
  212. list_add_tail(&mbe->entry, &msu_buffer_list);
  213. unlock:
  214. mutex_unlock(&msu_buffer_mutex);
  215. return ret;
  216. }
  217. EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
  218. void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
  219. {
  220. struct msu_buffer_entry *mbe;
  221. mutex_lock(&msu_buffer_mutex);
  222. mbe = __msu_buffer_entry_find(mbuf->name);
  223. if (mbe) {
  224. list_del(&mbe->entry);
  225. kfree(mbe);
  226. }
  227. mutex_unlock(&msu_buffer_mutex);
  228. }
  229. EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
  230. static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
  231. {
  232. /* header hasn't been written */
  233. if (!bdesc->valid_dw)
  234. return true;
  235. /* valid_dw includes the header */
  236. if (!msc_data_sz(bdesc))
  237. return true;
  238. return false;
  239. }
  240. static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
  241. {
  242. return win->sgt->sgl;
  243. }
  244. static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
  245. {
  246. return sg_virt(msc_win_base_sg(win));
  247. }
  248. static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
  249. {
  250. return sg_dma_address(msc_win_base_sg(win));
  251. }
  252. static inline unsigned long
  253. msc_win_base_pfn(struct msc_window *win)
  254. {
  255. return PFN_DOWN(msc_win_base_dma(win));
  256. }
  257. /**
  258. * msc_is_last_win() - check if a window is the last one for a given MSC
  259. * @win: window
  260. * Return: true if @win is the last window in MSC's multiblock buffer
  261. */
  262. static inline bool msc_is_last_win(struct msc_window *win)
  263. {
  264. return win->entry.next == &win->msc->win_list;
  265. }
  266. /**
  267. * msc_next_window() - return next window in the multiblock buffer
  268. * @win: current window
  269. *
  270. * Return: window following the current one
  271. */
  272. static struct msc_window *msc_next_window(struct msc_window *win)
  273. {
  274. if (msc_is_last_win(win))
  275. return list_first_entry(&win->msc->win_list, struct msc_window,
  276. entry);
  277. return list_next_entry(win, entry);
  278. }
  279. static size_t msc_win_total_sz(struct msc_window *win)
  280. {
  281. struct scatterlist *sg;
  282. unsigned int blk;
  283. size_t size = 0;
  284. for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
  285. struct msc_block_desc *bdesc = sg_virt(sg);
  286. if (msc_block_wrapped(bdesc))
  287. return (size_t)win->nr_blocks << PAGE_SHIFT;
  288. size += msc_total_sz(bdesc);
  289. if (msc_block_last_written(bdesc))
  290. break;
  291. }
  292. return size;
  293. }
  294. /**
  295. * msc_find_window() - find a window matching a given sg_table
  296. * @msc: MSC device
  297. * @sgt: SG table of the window
  298. * @nonempty: skip over empty windows
  299. *
  300. * Return: MSC window structure pointer or NULL if the window
  301. * could not be found.
  302. */
  303. static struct msc_window *
  304. msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
  305. {
  306. struct msc_window *win;
  307. unsigned int found = 0;
  308. if (list_empty(&msc->win_list))
  309. return NULL;
  310. /*
  311. * we might need a radix tree for this, depending on how
  312. * many windows a typical user would allocate; ideally it's
  313. * something like 2, in which case we're good
  314. */
  315. list_for_each_entry(win, &msc->win_list, entry) {
  316. if (win->sgt == sgt)
  317. found++;
  318. /* skip the empty ones */
  319. if (nonempty && msc_block_is_empty(msc_win_base(win)))
  320. continue;
  321. if (found)
  322. return win;
  323. }
  324. return NULL;
  325. }
  326. /**
  327. * msc_oldest_window() - locate the window with oldest data
  328. * @msc: MSC device
  329. *
  330. * This should only be used in multiblock mode. Caller should hold the
  331. * msc::user_count reference.
  332. *
  333. * Return: the oldest window with valid data
  334. */
  335. static struct msc_window *msc_oldest_window(struct msc *msc)
  336. {
  337. struct msc_window *win;
  338. if (list_empty(&msc->win_list))
  339. return NULL;
  340. win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
  341. if (win)
  342. return win;
  343. return list_first_entry(&msc->win_list, struct msc_window, entry);
  344. }
  345. /**
  346. * msc_win_oldest_sg() - locate the oldest block in a given window
  347. * @win: window to look at
  348. *
  349. * Return: index of the block with the oldest data
  350. */
  351. static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
  352. {
  353. unsigned int blk;
  354. struct scatterlist *sg;
  355. struct msc_block_desc *bdesc = msc_win_base(win);
  356. /* without wrapping, first block is the oldest */
  357. if (!msc_block_wrapped(bdesc))
  358. return msc_win_base_sg(win);
  359. /*
  360. * with wrapping, last written block contains both the newest and the
  361. * oldest data for this window.
  362. */
  363. for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
  364. struct msc_block_desc *bdesc = sg_virt(sg);
  365. if (msc_block_last_written(bdesc))
  366. return sg;
  367. }
  368. return msc_win_base_sg(win);
  369. }
  370. static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
  371. {
  372. return sg_virt(iter->block);
  373. }
  374. static struct msc_iter *msc_iter_install(struct msc *msc)
  375. {
  376. struct msc_iter *iter;
  377. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  378. if (!iter)
  379. return ERR_PTR(-ENOMEM);
  380. mutex_lock(&msc->buf_mutex);
  381. /*
  382. * Reading and tracing are mutually exclusive; if msc is
  383. * enabled, open() will fail; otherwise existing readers
  384. * will prevent enabling the msc and the rest of fops don't
  385. * need to worry about it.
  386. */
  387. if (msc->enabled) {
  388. kfree(iter);
  389. iter = ERR_PTR(-EBUSY);
  390. goto unlock;
  391. }
  392. iter->msc = msc;
  393. list_add_tail(&iter->entry, &msc->iter_list);
  394. unlock:
  395. mutex_unlock(&msc->buf_mutex);
  396. return iter;
  397. }
  398. static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
  399. {
  400. mutex_lock(&msc->buf_mutex);
  401. list_del(&iter->entry);
  402. mutex_unlock(&msc->buf_mutex);
  403. kfree(iter);
  404. }
  405. static void msc_iter_block_start(struct msc_iter *iter)
  406. {
  407. if (iter->start_block)
  408. return;
  409. iter->start_block = msc_win_oldest_sg(iter->win);
  410. iter->block = iter->start_block;
  411. iter->wrap_count = 0;
  412. /*
  413. * start with the block with oldest data; if data has wrapped
  414. * in this window, it should be in this block
  415. */
  416. if (msc_block_wrapped(msc_iter_bdesc(iter)))
  417. iter->wrap_count = 2;
  418. }
  419. static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
  420. {
  421. /* already started, nothing to do */
  422. if (iter->start_win)
  423. return 0;
  424. iter->start_win = msc_oldest_window(msc);
  425. if (!iter->start_win)
  426. return -EINVAL;
  427. iter->win = iter->start_win;
  428. iter->start_block = NULL;
  429. msc_iter_block_start(iter);
  430. return 0;
  431. }
  432. static int msc_iter_win_advance(struct msc_iter *iter)
  433. {
  434. iter->win = msc_next_window(iter->win);
  435. iter->start_block = NULL;
  436. if (iter->win == iter->start_win) {
  437. iter->eof++;
  438. return 1;
  439. }
  440. msc_iter_block_start(iter);
  441. return 0;
  442. }
  443. static int msc_iter_block_advance(struct msc_iter *iter)
  444. {
  445. iter->block_off = 0;
  446. /* wrapping */
  447. if (iter->wrap_count && iter->block == iter->start_block) {
  448. iter->wrap_count--;
  449. if (!iter->wrap_count)
  450. /* copied newest data from the wrapped block */
  451. return msc_iter_win_advance(iter);
  452. }
  453. /* no wrapping, check for last written block */
  454. if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
  455. /* copied newest data for the window */
  456. return msc_iter_win_advance(iter);
  457. /* block advance */
  458. if (sg_is_last(iter->block))
  459. iter->block = msc_win_base_sg(iter->win);
  460. else
  461. iter->block = sg_next(iter->block);
  462. /* no wrapping, sanity check in case there is no last written block */
  463. if (!iter->wrap_count && iter->block == iter->start_block)
  464. return msc_iter_win_advance(iter);
  465. return 0;
  466. }
  467. /**
  468. * msc_buffer_iterate() - go through multiblock buffer's data
  469. * @iter: iterator structure
  470. * @size: amount of data to scan
  471. * @data: callback's private data
  472. * @fn: iterator callback
  473. *
  474. * This will start at the window which will be written to next (containing
  475. * the oldest data) and work its way to the current window, calling @fn
  476. * for each chunk of data as it goes.
  477. *
  478. * Caller should have msc::user_count reference to make sure the buffer
  479. * doesn't disappear from under us.
  480. *
  481. * Return: amount of data actually scanned.
  482. */
  483. static ssize_t
  484. msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
  485. unsigned long (*fn)(void *, void *, size_t))
  486. {
  487. struct msc *msc = iter->msc;
  488. size_t len = size;
  489. unsigned int advance;
  490. if (iter->eof)
  491. return 0;
  492. /* start with the oldest window */
  493. if (msc_iter_win_start(iter, msc))
  494. return 0;
  495. do {
  496. unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
  497. void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
  498. size_t tocopy = data_bytes, copied = 0;
  499. size_t remaining = 0;
  500. advance = 1;
  501. /*
  502. * If block wrapping happened, we need to visit the last block
  503. * twice, because it contains both the oldest and the newest
  504. * data in this window.
  505. *
  506. * First time (wrap_count==2), in the very beginning, to collect
  507. * the oldest data, which is in the range
  508. * (data_bytes..DATA_IN_PAGE).
  509. *
  510. * Second time (wrap_count==1), it's just like any other block,
  511. * containing data in the range of [MSC_BDESC..data_bytes].
  512. */
  513. if (iter->block == iter->start_block && iter->wrap_count == 2) {
  514. tocopy = DATA_IN_PAGE - data_bytes;
  515. src += data_bytes;
  516. }
  517. if (!tocopy)
  518. goto next_block;
  519. tocopy -= iter->block_off;
  520. src += iter->block_off;
  521. if (len < tocopy) {
  522. tocopy = len;
  523. advance = 0;
  524. }
  525. remaining = fn(data, src, tocopy);
  526. if (remaining)
  527. advance = 0;
  528. copied = tocopy - remaining;
  529. len -= copied;
  530. iter->block_off += copied;
  531. iter->offset += copied;
  532. if (!advance)
  533. break;
  534. next_block:
  535. if (msc_iter_block_advance(iter))
  536. break;
  537. } while (len);
  538. return size - len;
  539. }
  540. /**
  541. * msc_buffer_clear_hw_header() - clear hw header for multiblock
  542. * @msc: MSC device
  543. */
  544. static void msc_buffer_clear_hw_header(struct msc *msc)
  545. {
  546. struct msc_window *win;
  547. struct scatterlist *sg;
  548. list_for_each_entry(win, &msc->win_list, entry) {
  549. unsigned int blk;
  550. for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
  551. struct msc_block_desc *bdesc = sg_virt(sg);
  552. memset_startat(bdesc, 0, hw_tag);
  553. }
  554. }
  555. }
  556. static int intel_th_msu_init(struct msc *msc)
  557. {
  558. u32 mintctl, msusts;
  559. if (!msc->do_irq)
  560. return 0;
  561. if (!msc->mbuf)
  562. return 0;
  563. mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
  564. mintctl |= msc->index ? M1BLIE : M0BLIE;
  565. iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
  566. if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
  567. dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
  568. msc->do_irq = 0;
  569. return 0;
  570. }
  571. msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
  572. iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
  573. return 0;
  574. }
  575. static void intel_th_msu_deinit(struct msc *msc)
  576. {
  577. u32 mintctl;
  578. if (!msc->do_irq)
  579. return;
  580. mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
  581. mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
  582. iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
  583. }
  584. static int msc_win_set_lockout(struct msc_window *win,
  585. enum lockout_state expect,
  586. enum lockout_state new)
  587. {
  588. enum lockout_state old;
  589. unsigned long flags;
  590. int ret = 0;
  591. if (!win->msc->mbuf)
  592. return 0;
  593. spin_lock_irqsave(&win->lo_lock, flags);
  594. old = win->lockout;
  595. if (old != expect) {
  596. ret = -EINVAL;
  597. goto unlock;
  598. }
  599. win->lockout = new;
  600. if (old == expect && new == WIN_LOCKED)
  601. atomic_inc(&win->msc->user_count);
  602. else if (old == expect && old == WIN_LOCKED)
  603. atomic_dec(&win->msc->user_count);
  604. unlock:
  605. spin_unlock_irqrestore(&win->lo_lock, flags);
  606. if (ret) {
  607. if (expect == WIN_READY && old == WIN_LOCKED)
  608. return -EBUSY;
  609. /* from intel_th_msc_window_unlock(), don't warn if not locked */
  610. if (expect == WIN_LOCKED && old == new)
  611. return 0;
  612. dev_warn_ratelimited(msc_dev(win->msc),
  613. "expected lockout state %d, got %d\n",
  614. expect, old);
  615. }
  616. return ret;
  617. }
  618. /**
  619. * msc_configure() - set up MSC hardware
  620. * @msc: the MSC device to configure
  621. *
  622. * Program storage mode, wrapping, burst length and trace buffer address
  623. * into a given MSC. Then, enable tracing and set msc::enabled.
  624. * The latter is serialized on msc::buf_mutex, so make sure to hold it.
  625. *
  626. * Return: %0 for success or a negative error code otherwise.
  627. */
  628. static int msc_configure(struct msc *msc)
  629. {
  630. u32 reg;
  631. lockdep_assert_held(&msc->buf_mutex);
  632. if (msc->mode > MSC_MODE_MULTI)
  633. return -EINVAL;
  634. if (msc->mode == MSC_MODE_MULTI) {
  635. if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
  636. return -EBUSY;
  637. msc_buffer_clear_hw_header(msc);
  638. }
  639. msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
  640. msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
  641. reg = msc->base_addr >> PAGE_SHIFT;
  642. iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
  643. if (msc->mode == MSC_MODE_SINGLE) {
  644. reg = msc->nr_pages;
  645. iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
  646. }
  647. reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
  648. reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
  649. reg |= MSC_EN;
  650. reg |= msc->mode << __ffs(MSC_MODE);
  651. reg |= msc->burst_len << __ffs(MSC_LEN);
  652. if (msc->wrap)
  653. reg |= MSC_WRAPEN;
  654. iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
  655. intel_th_msu_init(msc);
  656. msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
  657. intel_th_trace_enable(msc->thdev);
  658. msc->enabled = 1;
  659. if (msc->mbuf && msc->mbuf->activate)
  660. msc->mbuf->activate(msc->mbuf_priv);
  661. return 0;
  662. }
  663. /**
  664. * msc_disable() - disable MSC hardware
  665. * @msc: MSC device to disable
  666. *
  667. * If @msc is enabled, disable tracing on the switch and then disable MSC
  668. * storage. Caller must hold msc::buf_mutex.
  669. */
  670. static void msc_disable(struct msc *msc)
  671. {
  672. struct msc_window *win = msc->cur_win;
  673. u32 reg;
  674. lockdep_assert_held(&msc->buf_mutex);
  675. if (msc->mode == MSC_MODE_MULTI)
  676. msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
  677. if (msc->mbuf && msc->mbuf->deactivate)
  678. msc->mbuf->deactivate(msc->mbuf_priv);
  679. intel_th_msu_deinit(msc);
  680. intel_th_trace_disable(msc->thdev);
  681. if (msc->mode == MSC_MODE_SINGLE) {
  682. reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
  683. msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
  684. reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
  685. msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
  686. dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
  687. reg, msc->single_sz, msc->single_wrap);
  688. }
  689. reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
  690. reg &= ~MSC_EN;
  691. iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
  692. if (msc->mbuf && msc->mbuf->ready)
  693. msc->mbuf->ready(msc->mbuf_priv, win->sgt,
  694. msc_win_total_sz(win));
  695. msc->enabled = 0;
  696. iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
  697. iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
  698. dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
  699. ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
  700. reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
  701. dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
  702. reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
  703. reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
  704. iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
  705. }
  706. static int intel_th_msc_activate(struct intel_th_device *thdev)
  707. {
  708. struct msc *msc = dev_get_drvdata(&thdev->dev);
  709. int ret = -EBUSY;
  710. if (!atomic_inc_unless_negative(&msc->user_count))
  711. return -ENODEV;
  712. mutex_lock(&msc->buf_mutex);
  713. /* if there are readers, refuse */
  714. if (list_empty(&msc->iter_list))
  715. ret = msc_configure(msc);
  716. mutex_unlock(&msc->buf_mutex);
  717. if (ret)
  718. atomic_dec(&msc->user_count);
  719. return ret;
  720. }
  721. static void intel_th_msc_deactivate(struct intel_th_device *thdev)
  722. {
  723. struct msc *msc = dev_get_drvdata(&thdev->dev);
  724. mutex_lock(&msc->buf_mutex);
  725. if (msc->enabled) {
  726. msc_disable(msc);
  727. atomic_dec(&msc->user_count);
  728. }
  729. mutex_unlock(&msc->buf_mutex);
  730. }
  731. /**
  732. * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
  733. * @msc: MSC device
  734. * @size: allocation size in bytes
  735. *
  736. * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
  737. * caller is expected to hold it.
  738. *
  739. * Return: 0 on success, -errno otherwise.
  740. */
  741. static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
  742. {
  743. unsigned long nr_pages = size >> PAGE_SHIFT;
  744. unsigned int order = get_order(size);
  745. struct page *page;
  746. int ret;
  747. if (!size)
  748. return 0;
  749. ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
  750. if (ret)
  751. goto err_out;
  752. ret = -ENOMEM;
  753. page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
  754. if (!page)
  755. goto err_free_sgt;
  756. split_page(page, order);
  757. sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
  758. ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
  759. DMA_FROM_DEVICE);
  760. if (ret < 0)
  761. goto err_free_pages;
  762. msc->nr_pages = nr_pages;
  763. msc->base = page_address(page);
  764. msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
  765. return 0;
  766. err_free_pages:
  767. __free_pages(page, order);
  768. err_free_sgt:
  769. sg_free_table(&msc->single_sgt);
  770. err_out:
  771. return ret;
  772. }
  773. /**
  774. * msc_buffer_contig_free() - free a contiguous buffer
  775. * @msc: MSC configured in SINGLE mode
  776. */
  777. static void msc_buffer_contig_free(struct msc *msc)
  778. {
  779. unsigned long off;
  780. dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
  781. 1, DMA_FROM_DEVICE);
  782. sg_free_table(&msc->single_sgt);
  783. for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
  784. struct page *page = virt_to_page(msc->base + off);
  785. page->mapping = NULL;
  786. __free_page(page);
  787. }
  788. msc->nr_pages = 0;
  789. }
  790. /**
  791. * msc_buffer_contig_get_page() - find a page at a given offset
  792. * @msc: MSC configured in SINGLE mode
  793. * @pgoff: page offset
  794. *
  795. * Return: page, if @pgoff is within the range, NULL otherwise.
  796. */
  797. static struct page *msc_buffer_contig_get_page(struct msc *msc,
  798. unsigned long pgoff)
  799. {
  800. if (pgoff >= msc->nr_pages)
  801. return NULL;
  802. return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
  803. }
  804. static int __msc_buffer_win_alloc(struct msc_window *win,
  805. unsigned int nr_segs)
  806. {
  807. struct scatterlist *sg_ptr;
  808. void *block;
  809. int i, ret;
  810. ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
  811. if (ret)
  812. return -ENOMEM;
  813. for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
  814. block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
  815. PAGE_SIZE, &sg_dma_address(sg_ptr),
  816. GFP_KERNEL);
  817. if (!block)
  818. goto err_nomem;
  819. sg_set_buf(sg_ptr, block, PAGE_SIZE);
  820. }
  821. return nr_segs;
  822. err_nomem:
  823. for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
  824. dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
  825. sg_virt(sg_ptr), sg_dma_address(sg_ptr));
  826. sg_free_table(win->sgt);
  827. return -ENOMEM;
  828. }
  829. #ifdef CONFIG_X86
  830. static void msc_buffer_set_uc(struct msc *msc)
  831. {
  832. struct scatterlist *sg_ptr;
  833. struct msc_window *win;
  834. int i;
  835. if (msc->mode == MSC_MODE_SINGLE) {
  836. set_memory_uc((unsigned long)msc->base, msc->nr_pages);
  837. return;
  838. }
  839. list_for_each_entry(win, &msc->win_list, entry) {
  840. for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
  841. /* Set the page as uncached */
  842. set_memory_uc((unsigned long)sg_virt(sg_ptr),
  843. PFN_DOWN(sg_ptr->length));
  844. }
  845. }
  846. }
  847. static void msc_buffer_set_wb(struct msc *msc)
  848. {
  849. struct scatterlist *sg_ptr;
  850. struct msc_window *win;
  851. int i;
  852. if (msc->mode == MSC_MODE_SINGLE) {
  853. set_memory_wb((unsigned long)msc->base, msc->nr_pages);
  854. return;
  855. }
  856. list_for_each_entry(win, &msc->win_list, entry) {
  857. for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
  858. /* Reset the page to write-back */
  859. set_memory_wb((unsigned long)sg_virt(sg_ptr),
  860. PFN_DOWN(sg_ptr->length));
  861. }
  862. }
  863. }
  864. #else /* !X86 */
  865. static inline void
  866. msc_buffer_set_uc(struct msc *msc) {}
  867. static inline void msc_buffer_set_wb(struct msc *msc) {}
  868. #endif /* CONFIG_X86 */
  869. static struct page *msc_sg_page(struct scatterlist *sg)
  870. {
  871. void *addr = sg_virt(sg);
  872. if (is_vmalloc_addr(addr))
  873. return vmalloc_to_page(addr);
  874. return sg_page(sg);
  875. }
  876. /**
  877. * msc_buffer_win_alloc() - alloc a window for a multiblock mode
  878. * @msc: MSC device
  879. * @nr_blocks: number of pages in this window
  880. *
  881. * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
  882. * to serialize, so the caller is expected to hold it.
  883. *
  884. * Return: 0 on success, -errno otherwise.
  885. */
  886. static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
  887. {
  888. struct msc_window *win;
  889. int ret = -ENOMEM;
  890. if (!nr_blocks)
  891. return 0;
  892. win = kzalloc(sizeof(*win), GFP_KERNEL);
  893. if (!win)
  894. return -ENOMEM;
  895. win->msc = msc;
  896. win->sgt = &win->_sgt;
  897. win->lockout = WIN_READY;
  898. spin_lock_init(&win->lo_lock);
  899. if (!list_empty(&msc->win_list)) {
  900. struct msc_window *prev = list_last_entry(&msc->win_list,
  901. struct msc_window,
  902. entry);
  903. win->pgoff = prev->pgoff + prev->nr_blocks;
  904. }
  905. if (msc->mbuf && msc->mbuf->alloc_window)
  906. ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
  907. nr_blocks << PAGE_SHIFT);
  908. else
  909. ret = __msc_buffer_win_alloc(win, nr_blocks);
  910. if (ret <= 0)
  911. goto err_nomem;
  912. win->nr_segs = ret;
  913. win->nr_blocks = nr_blocks;
  914. if (list_empty(&msc->win_list)) {
  915. msc->base = msc_win_base(win);
  916. msc->base_addr = msc_win_base_dma(win);
  917. msc->cur_win = win;
  918. }
  919. list_add_tail(&win->entry, &msc->win_list);
  920. msc->nr_pages += nr_blocks;
  921. return 0;
  922. err_nomem:
  923. kfree(win);
  924. return ret;
  925. }
  926. static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
  927. {
  928. struct scatterlist *sg;
  929. int i;
  930. for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
  931. struct page *page = msc_sg_page(sg);
  932. page->mapping = NULL;
  933. dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
  934. sg_virt(sg), sg_dma_address(sg));
  935. }
  936. sg_free_table(win->sgt);
  937. }
  938. /**
  939. * msc_buffer_win_free() - free a window from MSC's window list
  940. * @msc: MSC device
  941. * @win: window to free
  942. *
  943. * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
  944. * to serialize, so the caller is expected to hold it.
  945. */
  946. static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
  947. {
  948. msc->nr_pages -= win->nr_blocks;
  949. list_del(&win->entry);
  950. if (list_empty(&msc->win_list)) {
  951. msc->base = NULL;
  952. msc->base_addr = 0;
  953. }
  954. if (msc->mbuf && msc->mbuf->free_window)
  955. msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
  956. else
  957. __msc_buffer_win_free(msc, win);
  958. kfree(win);
  959. }
  960. /**
  961. * msc_buffer_relink() - set up block descriptors for multiblock mode
  962. * @msc: MSC device
  963. *
  964. * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
  965. * so the caller is expected to hold it.
  966. */
  967. static void msc_buffer_relink(struct msc *msc)
  968. {
  969. struct msc_window *win, *next_win;
  970. /* call with msc::mutex locked */
  971. list_for_each_entry(win, &msc->win_list, entry) {
  972. struct scatterlist *sg;
  973. unsigned int blk;
  974. u32 sw_tag = 0;
  975. /*
  976. * Last window's next_win should point to the first window
  977. * and MSC_SW_TAG_LASTWIN should be set.
  978. */
  979. if (msc_is_last_win(win)) {
  980. sw_tag |= MSC_SW_TAG_LASTWIN;
  981. next_win = list_first_entry(&msc->win_list,
  982. struct msc_window, entry);
  983. } else {
  984. next_win = list_next_entry(win, entry);
  985. }
  986. for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
  987. struct msc_block_desc *bdesc = sg_virt(sg);
  988. memset(bdesc, 0, sizeof(*bdesc));
  989. bdesc->next_win = msc_win_base_pfn(next_win);
  990. /*
  991. * Similarly to last window, last block should point
  992. * to the first one.
  993. */
  994. if (blk == win->nr_segs - 1) {
  995. sw_tag |= MSC_SW_TAG_LASTBLK;
  996. bdesc->next_blk = msc_win_base_pfn(win);
  997. } else {
  998. dma_addr_t addr = sg_dma_address(sg_next(sg));
  999. bdesc->next_blk = PFN_DOWN(addr);
  1000. }
  1001. bdesc->sw_tag = sw_tag;
  1002. bdesc->block_sz = sg->length / 64;
  1003. }
  1004. }
  1005. /*
  1006. * Make the above writes globally visible before tracing is
  1007. * enabled to make sure hardware sees them coherently.
  1008. */
  1009. wmb();
  1010. }
  1011. static void msc_buffer_multi_free(struct msc *msc)
  1012. {
  1013. struct msc_window *win, *iter;
  1014. list_for_each_entry_safe(win, iter, &msc->win_list, entry)
  1015. msc_buffer_win_free(msc, win);
  1016. }
  1017. static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
  1018. unsigned int nr_wins)
  1019. {
  1020. int ret, i;
  1021. for (i = 0; i < nr_wins; i++) {
  1022. ret = msc_buffer_win_alloc(msc, nr_pages[i]);
  1023. if (ret) {
  1024. msc_buffer_multi_free(msc);
  1025. return ret;
  1026. }
  1027. }
  1028. msc_buffer_relink(msc);
  1029. return 0;
  1030. }
  1031. /**
  1032. * msc_buffer_free() - free buffers for MSC
  1033. * @msc: MSC device
  1034. *
  1035. * Free MSC's storage buffers.
  1036. *
  1037. * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
  1038. * serialize, so the caller is expected to hold it.
  1039. */
  1040. static void msc_buffer_free(struct msc *msc)
  1041. {
  1042. msc_buffer_set_wb(msc);
  1043. if (msc->mode == MSC_MODE_SINGLE)
  1044. msc_buffer_contig_free(msc);
  1045. else if (msc->mode == MSC_MODE_MULTI)
  1046. msc_buffer_multi_free(msc);
  1047. }
  1048. /**
  1049. * msc_buffer_alloc() - allocate a buffer for MSC
  1050. * @msc: MSC device
  1051. * @nr_pages: number of pages for each window
  1052. * @nr_wins: number of windows
  1053. *
  1054. * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
  1055. * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
  1056. * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
  1057. * window per invocation, so in multiblock mode this can be called multiple
  1058. * times for the same MSC to allocate multiple windows.
  1059. *
  1060. * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
  1061. * to serialize, so the caller is expected to hold it.
  1062. *
  1063. * Return: 0 on success, -errno otherwise.
  1064. */
  1065. static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
  1066. unsigned int nr_wins)
  1067. {
  1068. int ret;
  1069. /* -1: buffer not allocated */
  1070. if (atomic_read(&msc->user_count) != -1)
  1071. return -EBUSY;
  1072. if (msc->mode == MSC_MODE_SINGLE) {
  1073. if (nr_wins != 1)
  1074. return -EINVAL;
  1075. ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
  1076. } else if (msc->mode == MSC_MODE_MULTI) {
  1077. ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
  1078. } else {
  1079. ret = -EINVAL;
  1080. }
  1081. if (!ret) {
  1082. msc_buffer_set_uc(msc);
  1083. /* allocation should be visible before the counter goes to 0 */
  1084. smp_mb__before_atomic();
  1085. if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
  1086. return -EINVAL;
  1087. }
  1088. return ret;
  1089. }
  1090. /**
  1091. * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
  1092. * @msc: MSC device
  1093. *
  1094. * This will free MSC buffer unless it is in use or there is no allocated
  1095. * buffer.
  1096. * Caller needs to hold msc::buf_mutex.
  1097. *
  1098. * Return: 0 on successful deallocation or if there was no buffer to
  1099. * deallocate, -EBUSY if there are active users.
  1100. */
  1101. static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
  1102. {
  1103. int count, ret = 0;
  1104. count = atomic_cmpxchg(&msc->user_count, 0, -1);
  1105. /* > 0: buffer is allocated and has users */
  1106. if (count > 0)
  1107. ret = -EBUSY;
  1108. /* 0: buffer is allocated, no users */
  1109. else if (!count)
  1110. msc_buffer_free(msc);
  1111. /* < 0: no buffer, nothing to do */
  1112. return ret;
  1113. }
  1114. /**
  1115. * msc_buffer_free_unless_used() - free a buffer unless it's in use
  1116. * @msc: MSC device
  1117. *
  1118. * This is a locked version of msc_buffer_unlocked_free_unless_used().
  1119. *
  1120. * Return: 0 on successful deallocation or if there was no buffer to
  1121. * deallocate, -EBUSY if there are active users.
  1122. */
  1123. static int msc_buffer_free_unless_used(struct msc *msc)
  1124. {
  1125. int ret;
  1126. mutex_lock(&msc->buf_mutex);
  1127. ret = msc_buffer_unlocked_free_unless_used(msc);
  1128. mutex_unlock(&msc->buf_mutex);
  1129. return ret;
  1130. }
  1131. /**
  1132. * msc_buffer_get_page() - get MSC buffer page at a given offset
  1133. * @msc: MSC device
  1134. * @pgoff: page offset into the storage buffer
  1135. *
  1136. * This traverses msc::win_list, so holding msc::buf_mutex is expected from
  1137. * the caller.
  1138. *
  1139. * Return: page if @pgoff corresponds to a valid buffer page or NULL.
  1140. */
  1141. static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
  1142. {
  1143. struct msc_window *win;
  1144. struct scatterlist *sg;
  1145. unsigned int blk;
  1146. if (msc->mode == MSC_MODE_SINGLE)
  1147. return msc_buffer_contig_get_page(msc, pgoff);
  1148. list_for_each_entry(win, &msc->win_list, entry)
  1149. if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
  1150. goto found;
  1151. return NULL;
  1152. found:
  1153. pgoff -= win->pgoff;
  1154. for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
  1155. struct page *page = msc_sg_page(sg);
  1156. size_t pgsz = PFN_DOWN(sg->length);
  1157. if (pgoff < pgsz)
  1158. return page + pgoff;
  1159. pgoff -= pgsz;
  1160. }
  1161. return NULL;
  1162. }
  1163. /**
  1164. * struct msc_win_to_user_struct - data for copy_to_user() callback
  1165. * @buf: userspace buffer to copy data to
  1166. * @offset: running offset
  1167. */
  1168. struct msc_win_to_user_struct {
  1169. char __user *buf;
  1170. unsigned long offset;
  1171. };
  1172. /**
  1173. * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
  1174. * @data: callback's private data
  1175. * @src: source buffer
  1176. * @len: amount of data to copy from the source buffer
  1177. *
  1178. * Return: >= %0 for success or -errno for error.
  1179. */
  1180. static unsigned long msc_win_to_user(void *data, void *src, size_t len)
  1181. {
  1182. struct msc_win_to_user_struct *u = data;
  1183. unsigned long ret;
  1184. ret = copy_to_user(u->buf + u->offset, src, len);
  1185. u->offset += len - ret;
  1186. return ret;
  1187. }
  1188. /*
  1189. * file operations' callbacks
  1190. */
  1191. static int intel_th_msc_open(struct inode *inode, struct file *file)
  1192. {
  1193. struct intel_th_device *thdev = file->private_data;
  1194. struct msc *msc = dev_get_drvdata(&thdev->dev);
  1195. struct msc_iter *iter;
  1196. if (!capable(CAP_SYS_RAWIO))
  1197. return -EPERM;
  1198. iter = msc_iter_install(msc);
  1199. if (IS_ERR(iter))
  1200. return PTR_ERR(iter);
  1201. file->private_data = iter;
  1202. return nonseekable_open(inode, file);
  1203. }
  1204. static int intel_th_msc_release(struct inode *inode, struct file *file)
  1205. {
  1206. struct msc_iter *iter = file->private_data;
  1207. struct msc *msc = iter->msc;
  1208. msc_iter_remove(iter, msc);
  1209. return 0;
  1210. }
  1211. static ssize_t
  1212. msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
  1213. {
  1214. unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
  1215. unsigned long start = off, tocopy = 0;
  1216. if (msc->single_wrap) {
  1217. start += msc->single_sz;
  1218. if (start < size) {
  1219. tocopy = min(rem, size - start);
  1220. if (copy_to_user(buf, msc->base + start, tocopy))
  1221. return -EFAULT;
  1222. buf += tocopy;
  1223. rem -= tocopy;
  1224. start += tocopy;
  1225. }
  1226. start &= size - 1;
  1227. if (rem) {
  1228. tocopy = min(rem, msc->single_sz - start);
  1229. if (copy_to_user(buf, msc->base + start, tocopy))
  1230. return -EFAULT;
  1231. rem -= tocopy;
  1232. }
  1233. return len - rem;
  1234. }
  1235. if (copy_to_user(buf, msc->base + start, rem))
  1236. return -EFAULT;
  1237. return len;
  1238. }
  1239. static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
  1240. size_t len, loff_t *ppos)
  1241. {
  1242. struct msc_iter *iter = file->private_data;
  1243. struct msc *msc = iter->msc;
  1244. size_t size;
  1245. loff_t off = *ppos;
  1246. ssize_t ret = 0;
  1247. if (!atomic_inc_unless_negative(&msc->user_count))
  1248. return 0;
  1249. if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
  1250. size = msc->single_sz;
  1251. else
  1252. size = msc->nr_pages << PAGE_SHIFT;
  1253. if (!size)
  1254. goto put_count;
  1255. if (off >= size)
  1256. goto put_count;
  1257. if (off + len >= size)
  1258. len = size - off;
  1259. if (msc->mode == MSC_MODE_SINGLE) {
  1260. ret = msc_single_to_user(msc, buf, off, len);
  1261. if (ret >= 0)
  1262. *ppos += ret;
  1263. } else if (msc->mode == MSC_MODE_MULTI) {
  1264. struct msc_win_to_user_struct u = {
  1265. .buf = buf,
  1266. .offset = 0,
  1267. };
  1268. ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
  1269. if (ret >= 0)
  1270. *ppos = iter->offset;
  1271. } else {
  1272. ret = -EINVAL;
  1273. }
  1274. put_count:
  1275. atomic_dec(&msc->user_count);
  1276. return ret;
  1277. }
  1278. /*
  1279. * vm operations callbacks (vm_ops)
  1280. */
  1281. static void msc_mmap_open(struct vm_area_struct *vma)
  1282. {
  1283. struct msc_iter *iter = vma->vm_file->private_data;
  1284. struct msc *msc = iter->msc;
  1285. atomic_inc(&msc->mmap_count);
  1286. }
  1287. static void msc_mmap_close(struct vm_area_struct *vma)
  1288. {
  1289. struct msc_iter *iter = vma->vm_file->private_data;
  1290. struct msc *msc = iter->msc;
  1291. unsigned long pg;
  1292. if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
  1293. return;
  1294. /* drop page _refcounts */
  1295. for (pg = 0; pg < msc->nr_pages; pg++) {
  1296. struct page *page = msc_buffer_get_page(msc, pg);
  1297. if (WARN_ON_ONCE(!page))
  1298. continue;
  1299. if (page->mapping)
  1300. page->mapping = NULL;
  1301. }
  1302. /* last mapping -- drop user_count */
  1303. atomic_dec(&msc->user_count);
  1304. mutex_unlock(&msc->buf_mutex);
  1305. }
  1306. static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
  1307. {
  1308. struct msc_iter *iter = vmf->vma->vm_file->private_data;
  1309. struct msc *msc = iter->msc;
  1310. vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
  1311. if (!vmf->page)
  1312. return VM_FAULT_SIGBUS;
  1313. get_page(vmf->page);
  1314. vmf->page->mapping = vmf->vma->vm_file->f_mapping;
  1315. vmf->page->index = vmf->pgoff;
  1316. return 0;
  1317. }
  1318. static const struct vm_operations_struct msc_mmap_ops = {
  1319. .open = msc_mmap_open,
  1320. .close = msc_mmap_close,
  1321. .fault = msc_mmap_fault,
  1322. };
  1323. static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
  1324. {
  1325. unsigned long size = vma->vm_end - vma->vm_start;
  1326. struct msc_iter *iter = vma->vm_file->private_data;
  1327. struct msc *msc = iter->msc;
  1328. int ret = -EINVAL;
  1329. if (!size || offset_in_page(size))
  1330. return -EINVAL;
  1331. if (vma->vm_pgoff)
  1332. return -EINVAL;
  1333. /* grab user_count once per mmap; drop in msc_mmap_close() */
  1334. if (!atomic_inc_unless_negative(&msc->user_count))
  1335. return -EINVAL;
  1336. if (msc->mode != MSC_MODE_SINGLE &&
  1337. msc->mode != MSC_MODE_MULTI)
  1338. goto out;
  1339. if (size >> PAGE_SHIFT != msc->nr_pages)
  1340. goto out;
  1341. atomic_set(&msc->mmap_count, 1);
  1342. ret = 0;
  1343. out:
  1344. if (ret)
  1345. atomic_dec(&msc->user_count);
  1346. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1347. vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
  1348. vma->vm_ops = &msc_mmap_ops;
  1349. return ret;
  1350. }
  1351. static const struct file_operations intel_th_msc_fops = {
  1352. .open = intel_th_msc_open,
  1353. .release = intel_th_msc_release,
  1354. .read = intel_th_msc_read,
  1355. .mmap = intel_th_msc_mmap,
  1356. .owner = THIS_MODULE,
  1357. };
  1358. static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
  1359. {
  1360. struct msc *msc = dev_get_drvdata(&thdev->dev);
  1361. unsigned long count;
  1362. u32 reg;
  1363. for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
  1364. count && !(reg & MSCSTS_PLE); count--) {
  1365. reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
  1366. cpu_relax();
  1367. }
  1368. if (!count)
  1369. dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
  1370. }
  1371. static int intel_th_msc_init(struct msc *msc)
  1372. {
  1373. atomic_set(&msc->user_count, -1);
  1374. msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
  1375. mutex_init(&msc->buf_mutex);
  1376. INIT_LIST_HEAD(&msc->win_list);
  1377. INIT_LIST_HEAD(&msc->iter_list);
  1378. msc->burst_len =
  1379. (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
  1380. __ffs(MSC_LEN);
  1381. return 0;
  1382. }
  1383. static int msc_win_switch(struct msc *msc)
  1384. {
  1385. struct msc_window *first;
  1386. if (list_empty(&msc->win_list))
  1387. return -EINVAL;
  1388. first = list_first_entry(&msc->win_list, struct msc_window, entry);
  1389. if (msc_is_last_win(msc->cur_win))
  1390. msc->cur_win = first;
  1391. else
  1392. msc->cur_win = list_next_entry(msc->cur_win, entry);
  1393. msc->base = msc_win_base(msc->cur_win);
  1394. msc->base_addr = msc_win_base_dma(msc->cur_win);
  1395. intel_th_trace_switch(msc->thdev);
  1396. return 0;
  1397. }
  1398. /**
  1399. * intel_th_msc_window_unlock - put the window back in rotation
  1400. * @dev: MSC device to which this relates
  1401. * @sgt: buffer's sg_table for the window, does nothing if NULL
  1402. */
  1403. void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
  1404. {
  1405. struct msc *msc = dev_get_drvdata(dev);
  1406. struct msc_window *win;
  1407. if (!sgt)
  1408. return;
  1409. win = msc_find_window(msc, sgt, false);
  1410. if (!win)
  1411. return;
  1412. msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
  1413. if (msc->switch_on_unlock == win) {
  1414. msc->switch_on_unlock = NULL;
  1415. msc_win_switch(msc);
  1416. }
  1417. }
  1418. EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
  1419. static void msc_work(struct work_struct *work)
  1420. {
  1421. struct msc *msc = container_of(work, struct msc, work);
  1422. intel_th_msc_deactivate(msc->thdev);
  1423. }
  1424. static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
  1425. {
  1426. struct msc *msc = dev_get_drvdata(&thdev->dev);
  1427. u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
  1428. u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
  1429. struct msc_window *win, *next_win;
  1430. if (!msc->do_irq || !msc->mbuf)
  1431. return IRQ_NONE;
  1432. msusts &= mask;
  1433. if (!msusts)
  1434. return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
  1435. iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
  1436. if (!msc->enabled)
  1437. return IRQ_NONE;
  1438. /* grab the window before we do the switch */
  1439. win = msc->cur_win;
  1440. if (!win)
  1441. return IRQ_HANDLED;
  1442. next_win = msc_next_window(win);
  1443. if (!next_win)
  1444. return IRQ_HANDLED;
  1445. /* next window: if READY, proceed, if LOCKED, stop the trace */
  1446. if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
  1447. if (msc->stop_on_full)
  1448. schedule_work(&msc->work);
  1449. else
  1450. msc->switch_on_unlock = next_win;
  1451. return IRQ_HANDLED;
  1452. }
  1453. /* current window: INUSE -> LOCKED */
  1454. msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
  1455. msc_win_switch(msc);
  1456. if (msc->mbuf && msc->mbuf->ready)
  1457. msc->mbuf->ready(msc->mbuf_priv, win->sgt,
  1458. msc_win_total_sz(win));
  1459. return IRQ_HANDLED;
  1460. }
  1461. static const char * const msc_mode[] = {
  1462. [MSC_MODE_SINGLE] = "single",
  1463. [MSC_MODE_MULTI] = "multi",
  1464. [MSC_MODE_EXI] = "ExI",
  1465. [MSC_MODE_DEBUG] = "debug",
  1466. };
  1467. static ssize_t
  1468. wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
  1469. {
  1470. struct msc *msc = dev_get_drvdata(dev);
  1471. return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
  1472. }
  1473. static ssize_t
  1474. wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
  1475. size_t size)
  1476. {
  1477. struct msc *msc = dev_get_drvdata(dev);
  1478. unsigned long val;
  1479. int ret;
  1480. ret = kstrtoul(buf, 10, &val);
  1481. if (ret)
  1482. return ret;
  1483. msc->wrap = !!val;
  1484. return size;
  1485. }
  1486. static DEVICE_ATTR_RW(wrap);
  1487. static void msc_buffer_unassign(struct msc *msc)
  1488. {
  1489. lockdep_assert_held(&msc->buf_mutex);
  1490. if (!msc->mbuf)
  1491. return;
  1492. msc->mbuf->unassign(msc->mbuf_priv);
  1493. msu_buffer_put(msc->mbuf);
  1494. msc->mbuf_priv = NULL;
  1495. msc->mbuf = NULL;
  1496. }
  1497. static ssize_t
  1498. mode_show(struct device *dev, struct device_attribute *attr, char *buf)
  1499. {
  1500. struct msc *msc = dev_get_drvdata(dev);
  1501. const char *mode = msc_mode[msc->mode];
  1502. ssize_t ret;
  1503. mutex_lock(&msc->buf_mutex);
  1504. if (msc->mbuf)
  1505. mode = msc->mbuf->name;
  1506. ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
  1507. mutex_unlock(&msc->buf_mutex);
  1508. return ret;
  1509. }
  1510. static ssize_t
  1511. mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
  1512. size_t size)
  1513. {
  1514. const struct msu_buffer *mbuf = NULL;
  1515. struct msc *msc = dev_get_drvdata(dev);
  1516. size_t len = size;
  1517. char *cp, *mode;
  1518. int i, ret;
  1519. if (!capable(CAP_SYS_RAWIO))
  1520. return -EPERM;
  1521. cp = memchr(buf, '\n', len);
  1522. if (cp)
  1523. len = cp - buf;
  1524. mode = kstrndup(buf, len, GFP_KERNEL);
  1525. if (!mode)
  1526. return -ENOMEM;
  1527. i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
  1528. if (i >= 0) {
  1529. kfree(mode);
  1530. goto found;
  1531. }
  1532. /* Buffer sinks only work with a usable IRQ */
  1533. if (!msc->do_irq) {
  1534. kfree(mode);
  1535. return -EINVAL;
  1536. }
  1537. mbuf = msu_buffer_get(mode);
  1538. kfree(mode);
  1539. if (mbuf)
  1540. goto found;
  1541. return -EINVAL;
  1542. found:
  1543. if (i == MSC_MODE_MULTI && msc->multi_is_broken)
  1544. return -EOPNOTSUPP;
  1545. mutex_lock(&msc->buf_mutex);
  1546. ret = 0;
  1547. /* Same buffer: do nothing */
  1548. if (mbuf && mbuf == msc->mbuf) {
  1549. /* put the extra reference we just got */
  1550. msu_buffer_put(mbuf);
  1551. goto unlock;
  1552. }
  1553. ret = msc_buffer_unlocked_free_unless_used(msc);
  1554. if (ret)
  1555. goto unlock;
  1556. if (mbuf) {
  1557. void *mbuf_priv = mbuf->assign(dev, &i);
  1558. if (!mbuf_priv) {
  1559. ret = -ENOMEM;
  1560. goto unlock;
  1561. }
  1562. msc_buffer_unassign(msc);
  1563. msc->mbuf_priv = mbuf_priv;
  1564. msc->mbuf = mbuf;
  1565. } else {
  1566. msc_buffer_unassign(msc);
  1567. }
  1568. msc->mode = i;
  1569. unlock:
  1570. if (ret && mbuf)
  1571. msu_buffer_put(mbuf);
  1572. mutex_unlock(&msc->buf_mutex);
  1573. return ret ? ret : size;
  1574. }
  1575. static DEVICE_ATTR_RW(mode);
  1576. static ssize_t
  1577. nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
  1578. {
  1579. struct msc *msc = dev_get_drvdata(dev);
  1580. struct msc_window *win;
  1581. size_t count = 0;
  1582. mutex_lock(&msc->buf_mutex);
  1583. if (msc->mode == MSC_MODE_SINGLE)
  1584. count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
  1585. else if (msc->mode == MSC_MODE_MULTI) {
  1586. list_for_each_entry(win, &msc->win_list, entry) {
  1587. count += scnprintf(buf + count, PAGE_SIZE - count,
  1588. "%d%c", win->nr_blocks,
  1589. msc_is_last_win(win) ? '\n' : ',');
  1590. }
  1591. } else {
  1592. count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
  1593. }
  1594. mutex_unlock(&msc->buf_mutex);
  1595. return count;
  1596. }
  1597. static ssize_t
  1598. nr_pages_store(struct device *dev, struct device_attribute *attr,
  1599. const char *buf, size_t size)
  1600. {
  1601. struct msc *msc = dev_get_drvdata(dev);
  1602. unsigned long val, *win = NULL, *rewin;
  1603. size_t len = size;
  1604. const char *p = buf;
  1605. char *end, *s;
  1606. int ret, nr_wins = 0;
  1607. if (!capable(CAP_SYS_RAWIO))
  1608. return -EPERM;
  1609. ret = msc_buffer_free_unless_used(msc);
  1610. if (ret)
  1611. return ret;
  1612. /* scan the comma-separated list of allocation sizes */
  1613. end = memchr(buf, '\n', len);
  1614. if (end)
  1615. len = end - buf;
  1616. do {
  1617. end = memchr(p, ',', len);
  1618. s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
  1619. if (!s) {
  1620. ret = -ENOMEM;
  1621. goto free_win;
  1622. }
  1623. ret = kstrtoul(s, 10, &val);
  1624. kfree(s);
  1625. if (ret || !val)
  1626. goto free_win;
  1627. if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
  1628. ret = -EINVAL;
  1629. goto free_win;
  1630. }
  1631. nr_wins++;
  1632. rewin = krealloc_array(win, nr_wins, sizeof(*win), GFP_KERNEL);
  1633. if (!rewin) {
  1634. kfree(win);
  1635. return -ENOMEM;
  1636. }
  1637. win = rewin;
  1638. win[nr_wins - 1] = val;
  1639. if (!end)
  1640. break;
  1641. /* consume the number and the following comma, hence +1 */
  1642. len -= end - p + 1;
  1643. p = end + 1;
  1644. } while (len);
  1645. mutex_lock(&msc->buf_mutex);
  1646. ret = msc_buffer_alloc(msc, win, nr_wins);
  1647. mutex_unlock(&msc->buf_mutex);
  1648. free_win:
  1649. kfree(win);
  1650. return ret ? ret : size;
  1651. }
  1652. static DEVICE_ATTR_RW(nr_pages);
  1653. static ssize_t
  1654. win_switch_store(struct device *dev, struct device_attribute *attr,
  1655. const char *buf, size_t size)
  1656. {
  1657. struct msc *msc = dev_get_drvdata(dev);
  1658. unsigned long val;
  1659. int ret;
  1660. ret = kstrtoul(buf, 10, &val);
  1661. if (ret)
  1662. return ret;
  1663. if (val != 1)
  1664. return -EINVAL;
  1665. ret = -EINVAL;
  1666. mutex_lock(&msc->buf_mutex);
  1667. /*
  1668. * Window switch can only happen in the "multi" mode.
  1669. * If a external buffer is engaged, they have the full
  1670. * control over window switching.
  1671. */
  1672. if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
  1673. ret = msc_win_switch(msc);
  1674. mutex_unlock(&msc->buf_mutex);
  1675. return ret ? ret : size;
  1676. }
  1677. static DEVICE_ATTR_WO(win_switch);
  1678. static ssize_t stop_on_full_show(struct device *dev,
  1679. struct device_attribute *attr, char *buf)
  1680. {
  1681. struct msc *msc = dev_get_drvdata(dev);
  1682. return sprintf(buf, "%d\n", msc->stop_on_full);
  1683. }
  1684. static ssize_t stop_on_full_store(struct device *dev,
  1685. struct device_attribute *attr,
  1686. const char *buf, size_t size)
  1687. {
  1688. struct msc *msc = dev_get_drvdata(dev);
  1689. int ret;
  1690. ret = kstrtobool(buf, &msc->stop_on_full);
  1691. if (ret)
  1692. return ret;
  1693. return size;
  1694. }
  1695. static DEVICE_ATTR_RW(stop_on_full);
  1696. static struct attribute *msc_output_attrs[] = {
  1697. &dev_attr_wrap.attr,
  1698. &dev_attr_mode.attr,
  1699. &dev_attr_nr_pages.attr,
  1700. &dev_attr_win_switch.attr,
  1701. &dev_attr_stop_on_full.attr,
  1702. NULL,
  1703. };
  1704. static const struct attribute_group msc_output_group = {
  1705. .attrs = msc_output_attrs,
  1706. };
  1707. static int intel_th_msc_probe(struct intel_th_device *thdev)
  1708. {
  1709. struct device *dev = &thdev->dev;
  1710. struct resource *res;
  1711. struct msc *msc;
  1712. void __iomem *base;
  1713. int err;
  1714. res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
  1715. if (!res)
  1716. return -ENODEV;
  1717. base = devm_ioremap(dev, res->start, resource_size(res));
  1718. if (!base)
  1719. return -ENOMEM;
  1720. msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
  1721. if (!msc)
  1722. return -ENOMEM;
  1723. res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
  1724. if (!res)
  1725. msc->do_irq = 1;
  1726. if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
  1727. msc->multi_is_broken = 1;
  1728. msc->index = thdev->id;
  1729. msc->thdev = thdev;
  1730. msc->reg_base = base + msc->index * 0x100;
  1731. msc->msu_base = base;
  1732. INIT_WORK(&msc->work, msc_work);
  1733. err = intel_th_msc_init(msc);
  1734. if (err)
  1735. return err;
  1736. dev_set_drvdata(dev, msc);
  1737. return 0;
  1738. }
  1739. static void intel_th_msc_remove(struct intel_th_device *thdev)
  1740. {
  1741. struct msc *msc = dev_get_drvdata(&thdev->dev);
  1742. int ret;
  1743. intel_th_msc_deactivate(thdev);
  1744. /*
  1745. * Buffers should not be used at this point except if the
  1746. * output character device is still open and the parent
  1747. * device gets detached from its bus, which is a FIXME.
  1748. */
  1749. ret = msc_buffer_free_unless_used(msc);
  1750. WARN_ON_ONCE(ret);
  1751. }
  1752. static struct intel_th_driver intel_th_msc_driver = {
  1753. .probe = intel_th_msc_probe,
  1754. .remove = intel_th_msc_remove,
  1755. .irq = intel_th_msc_interrupt,
  1756. .wait_empty = intel_th_msc_wait_empty,
  1757. .activate = intel_th_msc_activate,
  1758. .deactivate = intel_th_msc_deactivate,
  1759. .fops = &intel_th_msc_fops,
  1760. .attr_group = &msc_output_group,
  1761. .driver = {
  1762. .name = "msc",
  1763. .owner = THIS_MODULE,
  1764. },
  1765. };
  1766. module_driver(intel_th_msc_driver,
  1767. intel_th_driver_register,
  1768. intel_th_driver_unregister);
  1769. MODULE_LICENSE("GPL v2");
  1770. MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
  1771. MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");