nbcon.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (C) 2022 Linutronix GmbH, John Ogness
  3. // Copyright (C) 2022 Intel, Thomas Gleixner
  4. #include <linux/atomic.h>
  5. #include <linux/bug.h>
  6. #include <linux/console.h>
  7. #include <linux/delay.h>
  8. #include <linux/errno.h>
  9. #include <linux/export.h>
  10. #include <linux/init.h>
  11. #include <linux/irqflags.h>
  12. #include <linux/kthread.h>
  13. #include <linux/minmax.h>
  14. #include <linux/percpu.h>
  15. #include <linux/preempt.h>
  16. #include <linux/slab.h>
  17. #include <linux/smp.h>
  18. #include <linux/stddef.h>
  19. #include <linux/string.h>
  20. #include <linux/types.h>
  21. #include "internal.h"
  22. #include "printk_ringbuffer.h"
  23. /*
  24. * Printk console printing implementation for consoles which does not depend
  25. * on the legacy style console_lock mechanism.
  26. *
  27. * The state of the console is maintained in the "nbcon_state" atomic
  28. * variable.
  29. *
  30. * The console is locked when:
  31. *
  32. * - The 'prio' field contains the priority of the context that owns the
  33. * console. Only higher priority contexts are allowed to take over the
  34. * lock. A value of 0 (NBCON_PRIO_NONE) means the console is not locked.
  35. *
  36. * - The 'cpu' field denotes on which CPU the console is locked. It is used
  37. * to prevent busy waiting on the same CPU. Also it informs the lock owner
  38. * that it has lost the lock in a more complex scenario when the lock was
  39. * taken over by a higher priority context, released, and taken on another
  40. * CPU with the same priority as the interrupted owner.
  41. *
  42. * The acquire mechanism uses a few more fields:
  43. *
  44. * - The 'req_prio' field is used by the handover approach to make the
  45. * current owner aware that there is a context with a higher priority
  46. * waiting for the friendly handover.
  47. *
  48. * - The 'unsafe' field allows to take over the console in a safe way in the
  49. * middle of emitting a message. The field is set only when accessing some
  50. * shared resources or when the console device is manipulated. It can be
  51. * cleared, for example, after emitting one character when the console
  52. * device is in a consistent state.
  53. *
  54. * - The 'unsafe_takeover' field is set when a hostile takeover took the
  55. * console in an unsafe state. The console will stay in the unsafe state
  56. * until re-initialized.
  57. *
  58. * The acquire mechanism uses three approaches:
  59. *
  60. * 1) Direct acquire when the console is not owned or is owned by a lower
  61. * priority context and is in a safe state.
  62. *
  63. * 2) Friendly handover mechanism uses a request/grant handshake. It is used
  64. * when the current owner has lower priority and the console is in an
  65. * unsafe state.
  66. *
  67. * The requesting context:
  68. *
  69. * a) Sets its priority into the 'req_prio' field.
  70. *
  71. * b) Waits (with a timeout) for the owning context to unlock the
  72. * console.
  73. *
  74. * c) Takes the lock and clears the 'req_prio' field.
  75. *
  76. * The owning context:
  77. *
  78. * a) Observes the 'req_prio' field set on exit from the unsafe
  79. * console state.
  80. *
  81. * b) Gives up console ownership by clearing the 'prio' field.
  82. *
  83. * 3) Unsafe hostile takeover allows to take over the lock even when the
  84. * console is an unsafe state. It is used only in panic() by the final
  85. * attempt to flush consoles in a try and hope mode.
  86. *
  87. * Note that separate record buffers are used in panic(). As a result,
  88. * the messages can be read and formatted without any risk even after
  89. * using the hostile takeover in unsafe state.
  90. *
  91. * The release function simply clears the 'prio' field.
  92. *
  93. * All operations on @console::nbcon_state are atomic cmpxchg based to
  94. * handle concurrency.
  95. *
  96. * The acquire/release functions implement only minimal policies:
  97. *
  98. * - Preference for higher priority contexts.
  99. * - Protection of the panic CPU.
  100. *
  101. * All other policy decisions must be made at the call sites:
  102. *
  103. * - What is marked as an unsafe section.
  104. * - Whether to spin-wait if there is already an owner and the console is
  105. * in an unsafe state.
  106. * - Whether to attempt an unsafe hostile takeover.
  107. *
  108. * The design allows to implement the well known:
  109. *
  110. * acquire()
  111. * output_one_printk_record()
  112. * release()
  113. *
  114. * The output of one printk record might be interrupted with a higher priority
  115. * context. The new owner is supposed to reprint the entire interrupted record
  116. * from scratch.
  117. */
  118. /**
  119. * nbcon_state_set - Helper function to set the console state
  120. * @con: Console to update
  121. * @new: The new state to write
  122. *
  123. * Only to be used when the console is not yet or no longer visible in the
  124. * system. Otherwise use nbcon_state_try_cmpxchg().
  125. */
  126. static inline void nbcon_state_set(struct console *con, struct nbcon_state *new)
  127. {
  128. atomic_set(&ACCESS_PRIVATE(con, nbcon_state), new->atom);
  129. }
  130. /**
  131. * nbcon_state_read - Helper function to read the console state
  132. * @con: Console to read
  133. * @state: The state to store the result
  134. */
  135. static inline void nbcon_state_read(struct console *con, struct nbcon_state *state)
  136. {
  137. state->atom = atomic_read(&ACCESS_PRIVATE(con, nbcon_state));
  138. }
  139. /**
  140. * nbcon_state_try_cmpxchg() - Helper function for atomic_try_cmpxchg() on console state
  141. * @con: Console to update
  142. * @cur: Old/expected state
  143. * @new: New state
  144. *
  145. * Return: True on success. False on fail and @cur is updated.
  146. */
  147. static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
  148. struct nbcon_state *new)
  149. {
  150. return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
  151. }
  152. /**
  153. * nbcon_seq_read - Read the current console sequence
  154. * @con: Console to read the sequence of
  155. *
  156. * Return: Sequence number of the next record to print on @con.
  157. */
  158. u64 nbcon_seq_read(struct console *con)
  159. {
  160. unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
  161. return __ulseq_to_u64seq(prb, nbcon_seq);
  162. }
  163. /**
  164. * nbcon_seq_force - Force console sequence to a specific value
  165. * @con: Console to work on
  166. * @seq: Sequence number value to set
  167. *
  168. * Only to be used during init (before registration) or in extreme situations
  169. * (such as panic with CONSOLE_REPLAY_ALL).
  170. */
  171. void nbcon_seq_force(struct console *con, u64 seq)
  172. {
  173. /*
  174. * If the specified record no longer exists, the oldest available record
  175. * is chosen. This is especially important on 32bit systems because only
  176. * the lower 32 bits of the sequence number are stored. The upper 32 bits
  177. * are derived from the sequence numbers available in the ringbuffer.
  178. */
  179. u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
  180. atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
  181. }
  182. /**
  183. * nbcon_seq_try_update - Try to update the console sequence number
  184. * @ctxt: Pointer to an acquire context that contains
  185. * all information about the acquire mode
  186. * @new_seq: The new sequence number to set
  187. *
  188. * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to
  189. * the 64bit value). This could be a different value than @new_seq if
  190. * nbcon_seq_force() was used or the current context no longer owns the
  191. * console. In the later case, it will stop printing anyway.
  192. */
  193. static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
  194. {
  195. unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
  196. struct console *con = ctxt->console;
  197. if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
  198. __u64seq_to_ulseq(new_seq))) {
  199. ctxt->seq = new_seq;
  200. } else {
  201. ctxt->seq = nbcon_seq_read(con);
  202. }
  203. }
  204. /**
  205. * nbcon_context_try_acquire_direct - Try to acquire directly
  206. * @ctxt: The context of the caller
  207. * @cur: The current console state
  208. *
  209. * Acquire the console when it is released. Also acquire the console when
  210. * the current owner has a lower priority and the console is in a safe state.
  211. *
  212. * Return: 0 on success. Otherwise, an error code on failure. Also @cur
  213. * is updated to the latest state when failed to modify it.
  214. *
  215. * Errors:
  216. *
  217. * -EPERM: A panic is in progress and this is not the panic CPU.
  218. * Or the current owner or waiter has the same or higher
  219. * priority. No acquire method can be successful in
  220. * this case.
  221. *
  222. * -EBUSY: The current owner has a lower priority but the console
  223. * in an unsafe state. The caller should try using
  224. * the handover acquire method.
  225. */
  226. static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt,
  227. struct nbcon_state *cur)
  228. {
  229. unsigned int cpu = smp_processor_id();
  230. struct console *con = ctxt->console;
  231. struct nbcon_state new;
  232. do {
  233. /*
  234. * Panic does not imply that the console is owned. However, it
  235. * is critical that non-panic CPUs during panic are unable to
  236. * acquire ownership in order to satisfy the assumptions of
  237. * nbcon_waiter_matches(). In particular, the assumption that
  238. * lower priorities are ignored during panic.
  239. */
  240. if (other_cpu_in_panic())
  241. return -EPERM;
  242. if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
  243. return -EPERM;
  244. if (cur->unsafe)
  245. return -EBUSY;
  246. /*
  247. * The console should never be safe for a direct acquire
  248. * if an unsafe hostile takeover has ever happened.
  249. */
  250. WARN_ON_ONCE(cur->unsafe_takeover);
  251. new.atom = cur->atom;
  252. new.prio = ctxt->prio;
  253. new.req_prio = NBCON_PRIO_NONE;
  254. new.unsafe = cur->unsafe_takeover;
  255. new.cpu = cpu;
  256. } while (!nbcon_state_try_cmpxchg(con, cur, &new));
  257. return 0;
  258. }
  259. static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
  260. {
  261. /*
  262. * The request context is well defined by the @req_prio because:
  263. *
  264. * - Only a context with a priority higher than the owner can become
  265. * a waiter.
  266. * - Only a context with a priority higher than the waiter can
  267. * directly take over the request.
  268. * - There are only three priorities.
  269. * - Only one CPU is allowed to request PANIC priority.
  270. * - Lower priorities are ignored during panic() until reboot.
  271. *
  272. * As a result, the following scenario is *not* possible:
  273. *
  274. * 1. This context is currently a waiter.
  275. * 2. Another context with a higher priority than this context
  276. * directly takes ownership.
  277. * 3. The higher priority context releases the ownership.
  278. * 4. Another lower priority context takes the ownership.
  279. * 5. Another context with the same priority as this context
  280. * creates a request and starts waiting.
  281. *
  282. * Event #1 implies this context is EMERGENCY.
  283. * Event #2 implies the new context is PANIC.
  284. * Event #3 occurs when panic() has flushed the console.
  285. * Events #4 and #5 are not possible due to the other_cpu_in_panic()
  286. * check in nbcon_context_try_acquire_direct().
  287. */
  288. return (cur->req_prio == expected_prio);
  289. }
  290. /**
  291. * nbcon_context_try_acquire_requested - Try to acquire after having
  292. * requested a handover
  293. * @ctxt: The context of the caller
  294. * @cur: The current console state
  295. *
  296. * This is a helper function for nbcon_context_try_acquire_handover().
  297. * It is called when the console is in an unsafe state. The current
  298. * owner will release the console on exit from the unsafe region.
  299. *
  300. * Return: 0 on success and @cur is updated to the new console state.
  301. * Otherwise an error code on failure.
  302. *
  303. * Errors:
  304. *
  305. * -EPERM: A panic is in progress and this is not the panic CPU
  306. * or this context is no longer the waiter.
  307. *
  308. * -EBUSY: The console is still locked. The caller should
  309. * continue waiting.
  310. *
  311. * Note: The caller must still remove the request when an error has occurred
  312. * except when this context is no longer the waiter.
  313. */
  314. static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt,
  315. struct nbcon_state *cur)
  316. {
  317. unsigned int cpu = smp_processor_id();
  318. struct console *con = ctxt->console;
  319. struct nbcon_state new;
  320. /* Note that the caller must still remove the request! */
  321. if (other_cpu_in_panic())
  322. return -EPERM;
  323. /*
  324. * Note that the waiter will also change if there was an unsafe
  325. * hostile takeover.
  326. */
  327. if (!nbcon_waiter_matches(cur, ctxt->prio))
  328. return -EPERM;
  329. /* If still locked, caller should continue waiting. */
  330. if (cur->prio != NBCON_PRIO_NONE)
  331. return -EBUSY;
  332. /*
  333. * The previous owner should have never released ownership
  334. * in an unsafe region.
  335. */
  336. WARN_ON_ONCE(cur->unsafe);
  337. new.atom = cur->atom;
  338. new.prio = ctxt->prio;
  339. new.req_prio = NBCON_PRIO_NONE;
  340. new.unsafe = cur->unsafe_takeover;
  341. new.cpu = cpu;
  342. if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
  343. /*
  344. * The acquire could fail only when it has been taken
  345. * over by a higher priority context.
  346. */
  347. WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
  348. return -EPERM;
  349. }
  350. /* Handover success. This context now owns the console. */
  351. return 0;
  352. }
  353. /**
  354. * nbcon_context_try_acquire_handover - Try to acquire via handover
  355. * @ctxt: The context of the caller
  356. * @cur: The current console state
  357. *
  358. * The function must be called only when the context has higher priority
  359. * than the current owner and the console is in an unsafe state.
  360. * It is the case when nbcon_context_try_acquire_direct() returns -EBUSY.
  361. *
  362. * The function sets "req_prio" field to make the current owner aware of
  363. * the request. Then it waits until the current owner releases the console,
  364. * or an even higher context takes over the request, or timeout expires.
  365. *
  366. * The current owner checks the "req_prio" field on exit from the unsafe
  367. * region and releases the console. It does not touch the "req_prio" field
  368. * so that the console stays reserved for the waiter.
  369. *
  370. * Return: 0 on success. Otherwise, an error code on failure. Also @cur
  371. * is updated to the latest state when failed to modify it.
  372. *
  373. * Errors:
  374. *
  375. * -EPERM: A panic is in progress and this is not the panic CPU.
  376. * Or a higher priority context has taken over the
  377. * console or the handover request.
  378. *
  379. * -EBUSY: The current owner is on the same CPU so that the hand
  380. * shake could not work. Or the current owner is not
  381. * willing to wait (zero timeout). Or the console does
  382. * not enter the safe state before timeout passed. The
  383. * caller might still use the unsafe hostile takeover
  384. * when allowed.
  385. *
  386. * -EAGAIN: @cur has changed when creating the handover request.
  387. * The caller should retry with direct acquire.
  388. */
  389. static int nbcon_context_try_acquire_handover(struct nbcon_context *ctxt,
  390. struct nbcon_state *cur)
  391. {
  392. unsigned int cpu = smp_processor_id();
  393. struct console *con = ctxt->console;
  394. struct nbcon_state new;
  395. int timeout;
  396. int request_err = -EBUSY;
  397. /*
  398. * Check that the handover is called when the direct acquire failed
  399. * with -EBUSY.
  400. */
  401. WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
  402. WARN_ON_ONCE(!cur->unsafe);
  403. /* Handover is not possible on the same CPU. */
  404. if (cur->cpu == cpu)
  405. return -EBUSY;
  406. /*
  407. * Console stays unsafe after an unsafe takeover until re-initialized.
  408. * Waiting is not going to help in this case.
  409. */
  410. if (cur->unsafe_takeover)
  411. return -EBUSY;
  412. /* Is the caller willing to wait? */
  413. if (ctxt->spinwait_max_us == 0)
  414. return -EBUSY;
  415. /*
  416. * Setup a request for the handover. The caller should try to acquire
  417. * the console directly when the current state has been modified.
  418. */
  419. new.atom = cur->atom;
  420. new.req_prio = ctxt->prio;
  421. if (!nbcon_state_try_cmpxchg(con, cur, &new))
  422. return -EAGAIN;
  423. cur->atom = new.atom;
  424. /* Wait until there is no owner and then acquire the console. */
  425. for (timeout = ctxt->spinwait_max_us; timeout >= 0; timeout--) {
  426. /* On successful acquire, this request is cleared. */
  427. request_err = nbcon_context_try_acquire_requested(ctxt, cur);
  428. if (!request_err)
  429. return 0;
  430. /*
  431. * If the acquire should be aborted, it must be ensured
  432. * that the request is removed before returning to caller.
  433. */
  434. if (request_err == -EPERM)
  435. break;
  436. udelay(1);
  437. /* Re-read the state because some time has passed. */
  438. nbcon_state_read(con, cur);
  439. }
  440. /* Timed out or aborted. Carefully remove handover request. */
  441. do {
  442. /*
  443. * No need to remove request if there is a new waiter. This
  444. * can only happen if a higher priority context has taken over
  445. * the console or the handover request.
  446. */
  447. if (!nbcon_waiter_matches(cur, ctxt->prio))
  448. return -EPERM;
  449. /* Unset request for handover. */
  450. new.atom = cur->atom;
  451. new.req_prio = NBCON_PRIO_NONE;
  452. if (nbcon_state_try_cmpxchg(con, cur, &new)) {
  453. /*
  454. * Request successfully unset. Report failure of
  455. * acquiring via handover.
  456. */
  457. cur->atom = new.atom;
  458. return request_err;
  459. }
  460. /*
  461. * Unable to remove request. Try to acquire in case
  462. * the owner has released the lock.
  463. */
  464. } while (nbcon_context_try_acquire_requested(ctxt, cur));
  465. /* Lucky timing. The acquire succeeded while removing the request. */
  466. return 0;
  467. }
  468. /**
  469. * nbcon_context_try_acquire_hostile - Acquire via unsafe hostile takeover
  470. * @ctxt: The context of the caller
  471. * @cur: The current console state
  472. *
  473. * Acquire the console even in the unsafe state.
  474. *
  475. * It can be permitted by setting the 'allow_unsafe_takeover' field only
  476. * by the final attempt to flush messages in panic().
  477. *
  478. * Return: 0 on success. -EPERM when not allowed by the context.
  479. */
  480. static int nbcon_context_try_acquire_hostile(struct nbcon_context *ctxt,
  481. struct nbcon_state *cur)
  482. {
  483. unsigned int cpu = smp_processor_id();
  484. struct console *con = ctxt->console;
  485. struct nbcon_state new;
  486. if (!ctxt->allow_unsafe_takeover)
  487. return -EPERM;
  488. /* Ensure caller is allowed to perform unsafe hostile takeovers. */
  489. if (WARN_ON_ONCE(ctxt->prio != NBCON_PRIO_PANIC))
  490. return -EPERM;
  491. /*
  492. * Check that try_acquire_direct() and try_acquire_handover() returned
  493. * -EBUSY in the right situation.
  494. */
  495. WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
  496. WARN_ON_ONCE(cur->unsafe != true);
  497. do {
  498. new.atom = cur->atom;
  499. new.cpu = cpu;
  500. new.prio = ctxt->prio;
  501. new.unsafe |= cur->unsafe_takeover;
  502. new.unsafe_takeover |= cur->unsafe;
  503. } while (!nbcon_state_try_cmpxchg(con, cur, &new));
  504. return 0;
  505. }
  506. static struct printk_buffers panic_nbcon_pbufs;
  507. /**
  508. * nbcon_context_try_acquire - Try to acquire nbcon console
  509. * @ctxt: The context of the caller
  510. *
  511. * Context: Under @ctxt->con->device_lock() or local_irq_save().
  512. * Return: True if the console was acquired. False otherwise.
  513. *
  514. * If the caller allowed an unsafe hostile takeover, on success the
  515. * caller should check the current console state to see if it is
  516. * in an unsafe state. Otherwise, on success the caller may assume
  517. * the console is not in an unsafe state.
  518. */
  519. static bool nbcon_context_try_acquire(struct nbcon_context *ctxt)
  520. {
  521. unsigned int cpu = smp_processor_id();
  522. struct console *con = ctxt->console;
  523. struct nbcon_state cur;
  524. int err;
  525. nbcon_state_read(con, &cur);
  526. try_again:
  527. err = nbcon_context_try_acquire_direct(ctxt, &cur);
  528. if (err != -EBUSY)
  529. goto out;
  530. err = nbcon_context_try_acquire_handover(ctxt, &cur);
  531. if (err == -EAGAIN)
  532. goto try_again;
  533. if (err != -EBUSY)
  534. goto out;
  535. err = nbcon_context_try_acquire_hostile(ctxt, &cur);
  536. out:
  537. if (err)
  538. return false;
  539. /* Acquire succeeded. */
  540. /* Assign the appropriate buffer for this context. */
  541. if (atomic_read(&panic_cpu) == cpu)
  542. ctxt->pbufs = &panic_nbcon_pbufs;
  543. else
  544. ctxt->pbufs = con->pbufs;
  545. /* Set the record sequence for this context to print. */
  546. ctxt->seq = nbcon_seq_read(ctxt->console);
  547. return true;
  548. }
  549. static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
  550. int expected_prio)
  551. {
  552. /*
  553. * A similar function, nbcon_waiter_matches(), only deals with
  554. * EMERGENCY and PANIC priorities. However, this function must also
  555. * deal with the NORMAL priority, which requires additional checks
  556. * and constraints.
  557. *
  558. * For the case where preemption and interrupts are disabled, it is
  559. * enough to also verify that the owning CPU has not changed.
  560. *
  561. * For the case where preemption or interrupts are enabled, an
  562. * external synchronization method *must* be used. In particular,
  563. * the driver-specific locking mechanism used in device_lock()
  564. * (including disabling migration) should be used. It prevents
  565. * scenarios such as:
  566. *
  567. * 1. [Task A] owns a context with NBCON_PRIO_NORMAL on [CPU X] and
  568. * is scheduled out.
  569. * 2. Another context takes over the lock with NBCON_PRIO_EMERGENCY
  570. * and releases it.
  571. * 3. [Task B] acquires a context with NBCON_PRIO_NORMAL on [CPU X]
  572. * and is scheduled out.
  573. * 4. [Task A] gets running on [CPU X] and sees that the console is
  574. * still owned by a task on [CPU X] with NBON_PRIO_NORMAL. Thus
  575. * [Task A] thinks it is the owner when it is not.
  576. */
  577. if (cur->prio != expected_prio)
  578. return false;
  579. if (cur->cpu != expected_cpu)
  580. return false;
  581. return true;
  582. }
  583. /**
  584. * nbcon_context_release - Release the console
  585. * @ctxt: The nbcon context from nbcon_context_try_acquire()
  586. */
  587. static void nbcon_context_release(struct nbcon_context *ctxt)
  588. {
  589. unsigned int cpu = smp_processor_id();
  590. struct console *con = ctxt->console;
  591. struct nbcon_state cur;
  592. struct nbcon_state new;
  593. nbcon_state_read(con, &cur);
  594. do {
  595. if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
  596. break;
  597. new.atom = cur.atom;
  598. new.prio = NBCON_PRIO_NONE;
  599. /*
  600. * If @unsafe_takeover is set, it is kept set so that
  601. * the state remains permanently unsafe.
  602. */
  603. new.unsafe |= cur.unsafe_takeover;
  604. } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
  605. ctxt->pbufs = NULL;
  606. }
  607. /**
  608. * nbcon_context_can_proceed - Check whether ownership can proceed
  609. * @ctxt: The nbcon context from nbcon_context_try_acquire()
  610. * @cur: The current console state
  611. *
  612. * Return: True if this context still owns the console. False if
  613. * ownership was handed over or taken.
  614. *
  615. * Must be invoked when entering the unsafe state to make sure that it still
  616. * owns the lock. Also must be invoked when exiting the unsafe context
  617. * to eventually free the lock for a higher priority context which asked
  618. * for the friendly handover.
  619. *
  620. * It can be called inside an unsafe section when the console is just
  621. * temporary in safe state instead of exiting and entering the unsafe
  622. * state.
  623. *
  624. * Also it can be called in the safe context before doing an expensive
  625. * safe operation. It does not make sense to do the operation when
  626. * a higher priority context took the lock.
  627. *
  628. * When this function returns false then the calling context no longer owns
  629. * the console and is no longer allowed to go forward. In this case it must
  630. * back out immediately and carefully. The buffer content is also no longer
  631. * trusted since it no longer belongs to the calling context.
  632. */
  633. static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
  634. {
  635. unsigned int cpu = smp_processor_id();
  636. /* Make sure this context still owns the console. */
  637. if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
  638. return false;
  639. /* The console owner can proceed if there is no waiter. */
  640. if (cur->req_prio == NBCON_PRIO_NONE)
  641. return true;
  642. /*
  643. * A console owner within an unsafe region is always allowed to
  644. * proceed, even if there are waiters. It can perform a handover
  645. * when exiting the unsafe region. Otherwise the waiter will
  646. * need to perform an unsafe hostile takeover.
  647. */
  648. if (cur->unsafe)
  649. return true;
  650. /* Waiters always have higher priorities than owners. */
  651. WARN_ON_ONCE(cur->req_prio <= cur->prio);
  652. /*
  653. * Having a safe point for take over and eventually a few
  654. * duplicated characters or a full line is way better than a
  655. * hostile takeover. Post processing can take care of the garbage.
  656. * Release and hand over.
  657. */
  658. nbcon_context_release(ctxt);
  659. /*
  660. * It is not clear whether the waiter really took over ownership. The
  661. * outermost callsite must make the final decision whether console
  662. * ownership is needed for it to proceed. If yes, it must reacquire
  663. * ownership (possibly hostile) before carefully proceeding.
  664. *
  665. * The calling context no longer owns the console so go back all the
  666. * way instead of trying to implement reacquire heuristics in tons of
  667. * places.
  668. */
  669. return false;
  670. }
  671. /**
  672. * nbcon_can_proceed - Check whether ownership can proceed
  673. * @wctxt: The write context that was handed to the write function
  674. *
  675. * Return: True if this context still owns the console. False if
  676. * ownership was handed over or taken.
  677. *
  678. * It is used in nbcon_enter_unsafe() to make sure that it still owns the
  679. * lock. Also it is used in nbcon_exit_unsafe() to eventually free the lock
  680. * for a higher priority context which asked for the friendly handover.
  681. *
  682. * It can be called inside an unsafe section when the console is just
  683. * temporary in safe state instead of exiting and entering the unsafe state.
  684. *
  685. * Also it can be called in the safe context before doing an expensive safe
  686. * operation. It does not make sense to do the operation when a higher
  687. * priority context took the lock.
  688. *
  689. * When this function returns false then the calling context no longer owns
  690. * the console and is no longer allowed to go forward. In this case it must
  691. * back out immediately and carefully. The buffer content is also no longer
  692. * trusted since it no longer belongs to the calling context.
  693. */
  694. bool nbcon_can_proceed(struct nbcon_write_context *wctxt)
  695. {
  696. struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
  697. struct console *con = ctxt->console;
  698. struct nbcon_state cur;
  699. nbcon_state_read(con, &cur);
  700. return nbcon_context_can_proceed(ctxt, &cur);
  701. }
  702. EXPORT_SYMBOL_GPL(nbcon_can_proceed);
  703. #define nbcon_context_enter_unsafe(c) __nbcon_context_update_unsafe(c, true)
  704. #define nbcon_context_exit_unsafe(c) __nbcon_context_update_unsafe(c, false)
  705. /**
  706. * __nbcon_context_update_unsafe - Update the unsafe bit in @con->nbcon_state
  707. * @ctxt: The nbcon context from nbcon_context_try_acquire()
  708. * @unsafe: The new value for the unsafe bit
  709. *
  710. * Return: True if the unsafe state was updated and this context still
  711. * owns the console. Otherwise false if ownership was handed
  712. * over or taken.
  713. *
  714. * This function allows console owners to modify the unsafe status of the
  715. * console.
  716. *
  717. * When this function returns false then the calling context no longer owns
  718. * the console and is no longer allowed to go forward. In this case it must
  719. * back out immediately and carefully. The buffer content is also no longer
  720. * trusted since it no longer belongs to the calling context.
  721. *
  722. * Internal helper to avoid duplicated code.
  723. */
  724. static bool __nbcon_context_update_unsafe(struct nbcon_context *ctxt, bool unsafe)
  725. {
  726. struct console *con = ctxt->console;
  727. struct nbcon_state cur;
  728. struct nbcon_state new;
  729. nbcon_state_read(con, &cur);
  730. do {
  731. /*
  732. * The unsafe bit must not be cleared if an
  733. * unsafe hostile takeover has occurred.
  734. */
  735. if (!unsafe && cur.unsafe_takeover)
  736. goto out;
  737. if (!nbcon_context_can_proceed(ctxt, &cur))
  738. return false;
  739. new.atom = cur.atom;
  740. new.unsafe = unsafe;
  741. } while (!nbcon_state_try_cmpxchg(con, &cur, &new));
  742. cur.atom = new.atom;
  743. out:
  744. return nbcon_context_can_proceed(ctxt, &cur);
  745. }
  746. static void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
  747. char *buf, unsigned int len)
  748. {
  749. struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
  750. struct console *con = ctxt->console;
  751. struct nbcon_state cur;
  752. wctxt->outbuf = buf;
  753. wctxt->len = len;
  754. nbcon_state_read(con, &cur);
  755. wctxt->unsafe_takeover = cur.unsafe_takeover;
  756. }
  757. /**
  758. * nbcon_enter_unsafe - Enter an unsafe region in the driver
  759. * @wctxt: The write context that was handed to the write function
  760. *
  761. * Return: True if this context still owns the console. False if
  762. * ownership was handed over or taken.
  763. *
  764. * When this function returns false then the calling context no longer owns
  765. * the console and is no longer allowed to go forward. In this case it must
  766. * back out immediately and carefully. The buffer content is also no longer
  767. * trusted since it no longer belongs to the calling context.
  768. */
  769. bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt)
  770. {
  771. struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
  772. bool is_owner;
  773. is_owner = nbcon_context_enter_unsafe(ctxt);
  774. if (!is_owner)
  775. nbcon_write_context_set_buf(wctxt, NULL, 0);
  776. return is_owner;
  777. }
  778. EXPORT_SYMBOL_GPL(nbcon_enter_unsafe);
  779. /**
  780. * nbcon_exit_unsafe - Exit an unsafe region in the driver
  781. * @wctxt: The write context that was handed to the write function
  782. *
  783. * Return: True if this context still owns the console. False if
  784. * ownership was handed over or taken.
  785. *
  786. * When this function returns false then the calling context no longer owns
  787. * the console and is no longer allowed to go forward. In this case it must
  788. * back out immediately and carefully. The buffer content is also no longer
  789. * trusted since it no longer belongs to the calling context.
  790. */
  791. bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt)
  792. {
  793. struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
  794. bool ret;
  795. ret = nbcon_context_exit_unsafe(ctxt);
  796. if (!ret)
  797. nbcon_write_context_set_buf(wctxt, NULL, 0);
  798. return ret;
  799. }
  800. EXPORT_SYMBOL_GPL(nbcon_exit_unsafe);
  801. /**
  802. * nbcon_reacquire_nobuf - Reacquire a console after losing ownership
  803. * while printing
  804. * @wctxt: The write context that was handed to the write callback
  805. *
  806. * Since ownership can be lost at any time due to handover or takeover, a
  807. * printing context _must_ be prepared to back out immediately and
  808. * carefully. However, there are scenarios where the printing context must
  809. * reacquire ownership in order to finalize or revert hardware changes.
  810. *
  811. * This function allows a printing context to reacquire ownership using the
  812. * same priority as its previous ownership.
  813. *
  814. * Note that after a successful reacquire the printing context will have no
  815. * output buffer because that has been lost. This function cannot be used to
  816. * resume printing.
  817. */
  818. void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt)
  819. {
  820. struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
  821. while (!nbcon_context_try_acquire(ctxt))
  822. cpu_relax();
  823. nbcon_write_context_set_buf(wctxt, NULL, 0);
  824. }
  825. EXPORT_SYMBOL_GPL(nbcon_reacquire_nobuf);
  826. /**
  827. * nbcon_emit_next_record - Emit a record in the acquired context
  828. * @wctxt: The write context that will be handed to the write function
  829. * @use_atomic: True if the write_atomic() callback is to be used
  830. *
  831. * Return: True if this context still owns the console. False if
  832. * ownership was handed over or taken.
  833. *
  834. * When this function returns false then the calling context no longer owns
  835. * the console and is no longer allowed to go forward. In this case it must
  836. * back out immediately and carefully. The buffer content is also no longer
  837. * trusted since it no longer belongs to the calling context. If the caller
  838. * wants to do more it must reacquire the console first.
  839. *
  840. * When true is returned, @wctxt->ctxt.backlog indicates whether there are
  841. * still records pending in the ringbuffer,
  842. */
  843. static bool nbcon_emit_next_record(struct nbcon_write_context *wctxt, bool use_atomic)
  844. {
  845. struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
  846. struct console *con = ctxt->console;
  847. bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
  848. struct printk_message pmsg = {
  849. .pbufs = ctxt->pbufs,
  850. };
  851. unsigned long con_dropped;
  852. struct nbcon_state cur;
  853. unsigned long dropped;
  854. unsigned long ulseq;
  855. /*
  856. * This function should never be called for consoles that have not
  857. * implemented the necessary callback for writing: i.e. legacy
  858. * consoles and, when atomic, nbcon consoles with no write_atomic().
  859. * Handle it as if ownership was lost and try to continue.
  860. *
  861. * Note that for nbcon consoles the write_thread() callback is
  862. * mandatory and was already checked in nbcon_alloc().
  863. */
  864. if (WARN_ON_ONCE((use_atomic && !con->write_atomic) ||
  865. !(console_srcu_read_flags(con) & CON_NBCON))) {
  866. nbcon_context_release(ctxt);
  867. return false;
  868. }
  869. /*
  870. * The printk buffers are filled within an unsafe section. This
  871. * prevents NBCON_PRIO_NORMAL and NBCON_PRIO_EMERGENCY from
  872. * clobbering each other.
  873. */
  874. if (!nbcon_context_enter_unsafe(ctxt))
  875. return false;
  876. ctxt->backlog = printk_get_next_message(&pmsg, ctxt->seq, is_extended, true);
  877. if (!ctxt->backlog)
  878. return nbcon_context_exit_unsafe(ctxt);
  879. /*
  880. * @con->dropped is not protected in case of an unsafe hostile
  881. * takeover. In that situation the update can be racy so
  882. * annotate it accordingly.
  883. */
  884. con_dropped = data_race(READ_ONCE(con->dropped));
  885. dropped = con_dropped + pmsg.dropped;
  886. if (dropped && !is_extended)
  887. console_prepend_dropped(&pmsg, dropped);
  888. /*
  889. * If the previous owner was assigned the same record, this context
  890. * has taken over ownership and is replaying the record. Prepend a
  891. * message to let the user know the record is replayed.
  892. */
  893. ulseq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_prev_seq));
  894. if (__ulseq_to_u64seq(prb, ulseq) == pmsg.seq) {
  895. console_prepend_replay(&pmsg);
  896. } else {
  897. /*
  898. * Ensure this context is still the owner before trying to
  899. * update @nbcon_prev_seq. Otherwise the value in @ulseq may
  900. * not be from the previous owner and instead be some later
  901. * value from the context that took over ownership.
  902. */
  903. nbcon_state_read(con, &cur);
  904. if (!nbcon_context_can_proceed(ctxt, &cur))
  905. return false;
  906. atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_prev_seq), &ulseq,
  907. __u64seq_to_ulseq(pmsg.seq));
  908. }
  909. if (!nbcon_context_exit_unsafe(ctxt))
  910. return false;
  911. /* For skipped records just update seq/dropped in @con. */
  912. if (pmsg.outbuf_len == 0)
  913. goto update_con;
  914. /* Initialize the write context for driver callbacks. */
  915. nbcon_write_context_set_buf(wctxt, &pmsg.pbufs->outbuf[0], pmsg.outbuf_len);
  916. if (use_atomic)
  917. con->write_atomic(con, wctxt);
  918. else
  919. con->write_thread(con, wctxt);
  920. if (!wctxt->outbuf) {
  921. /*
  922. * Ownership was lost and reacquired by the driver. Handle it
  923. * as if ownership was lost.
  924. */
  925. nbcon_context_release(ctxt);
  926. return false;
  927. }
  928. /*
  929. * Ownership may have been lost but _not_ reacquired by the driver.
  930. * This case is detected and handled when entering unsafe to update
  931. * dropped/seq values.
  932. */
  933. /*
  934. * Since any dropped message was successfully output, reset the
  935. * dropped count for the console.
  936. */
  937. dropped = 0;
  938. update_con:
  939. /*
  940. * The dropped count and the sequence number are updated within an
  941. * unsafe section. This limits update races to the panic context and
  942. * allows the panic context to win.
  943. */
  944. if (!nbcon_context_enter_unsafe(ctxt))
  945. return false;
  946. if (dropped != con_dropped) {
  947. /* Counterpart to the READ_ONCE() above. */
  948. WRITE_ONCE(con->dropped, dropped);
  949. }
  950. nbcon_seq_try_update(ctxt, pmsg.seq + 1);
  951. return nbcon_context_exit_unsafe(ctxt);
  952. }
  953. /*
  954. * nbcon_emit_one - Print one record for an nbcon console using the
  955. * specified callback
  956. * @wctxt: An initialized write context struct to use for this context
  957. * @use_atomic: True if the write_atomic() callback is to be used
  958. *
  959. * Return: True, when a record has been printed and there are still
  960. * pending records. The caller might want to continue flushing.
  961. *
  962. * False, when there is no pending record, or when the console
  963. * context cannot be acquired, or the ownership has been lost.
  964. * The caller should give up. Either the job is done, cannot be
  965. * done, or will be handled by the owning context.
  966. *
  967. * This is an internal helper to handle the locking of the console before
  968. * calling nbcon_emit_next_record().
  969. */
  970. static bool nbcon_emit_one(struct nbcon_write_context *wctxt, bool use_atomic)
  971. {
  972. struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt);
  973. struct console *con = ctxt->console;
  974. unsigned long flags;
  975. bool ret = false;
  976. if (!use_atomic) {
  977. con->device_lock(con, &flags);
  978. /*
  979. * Ensure this stays on the CPU to make handover and
  980. * takeover possible.
  981. */
  982. cant_migrate();
  983. }
  984. if (!nbcon_context_try_acquire(ctxt))
  985. goto out;
  986. /*
  987. * nbcon_emit_next_record() returns false when the console was
  988. * handed over or taken over. In both cases the context is no
  989. * longer valid.
  990. *
  991. * The higher priority printing context takes over responsibility
  992. * to print the pending records.
  993. */
  994. if (!nbcon_emit_next_record(wctxt, use_atomic))
  995. goto out;
  996. nbcon_context_release(ctxt);
  997. ret = ctxt->backlog;
  998. out:
  999. if (!use_atomic)
  1000. con->device_unlock(con, flags);
  1001. return ret;
  1002. }
  1003. /**
  1004. * nbcon_kthread_should_wakeup - Check whether a printer thread should wakeup
  1005. * @con: Console to operate on
  1006. * @ctxt: The nbcon context from nbcon_context_try_acquire()
  1007. *
  1008. * Return: True if the thread should shutdown or if the console is
  1009. * allowed to print and a record is available. False otherwise.
  1010. *
  1011. * After the thread wakes up, it must first check if it should shutdown before
  1012. * attempting any printing.
  1013. */
  1014. static bool nbcon_kthread_should_wakeup(struct console *con, struct nbcon_context *ctxt)
  1015. {
  1016. bool ret = false;
  1017. short flags;
  1018. int cookie;
  1019. if (kthread_should_stop())
  1020. return true;
  1021. cookie = console_srcu_read_lock();
  1022. flags = console_srcu_read_flags(con);
  1023. if (console_is_usable(con, flags, false)) {
  1024. /* Bring the sequence in @ctxt up to date */
  1025. ctxt->seq = nbcon_seq_read(con);
  1026. ret = prb_read_valid(prb, ctxt->seq, NULL);
  1027. }
  1028. console_srcu_read_unlock(cookie);
  1029. return ret;
  1030. }
  1031. /**
  1032. * nbcon_kthread_func - The printer thread function
  1033. * @__console: Console to operate on
  1034. *
  1035. * Return: 0
  1036. */
  1037. static int nbcon_kthread_func(void *__console)
  1038. {
  1039. struct console *con = __console;
  1040. struct nbcon_write_context wctxt = {
  1041. .ctxt.console = con,
  1042. .ctxt.prio = NBCON_PRIO_NORMAL,
  1043. };
  1044. struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
  1045. short con_flags;
  1046. bool backlog;
  1047. int cookie;
  1048. wait_for_event:
  1049. /*
  1050. * Guarantee this task is visible on the rcuwait before
  1051. * checking the wake condition.
  1052. *
  1053. * The full memory barrier within set_current_state() of
  1054. * ___rcuwait_wait_event() pairs with the full memory
  1055. * barrier within rcuwait_has_sleeper().
  1056. *
  1057. * This pairs with rcuwait_has_sleeper:A and nbcon_kthread_wake:A.
  1058. */
  1059. rcuwait_wait_event(&con->rcuwait,
  1060. nbcon_kthread_should_wakeup(con, ctxt),
  1061. TASK_INTERRUPTIBLE); /* LMM(nbcon_kthread_func:A) */
  1062. do {
  1063. if (kthread_should_stop())
  1064. return 0;
  1065. backlog = false;
  1066. /*
  1067. * Keep the srcu read lock around the entire operation so that
  1068. * synchronize_srcu() can guarantee that the kthread stopped
  1069. * or suspended printing.
  1070. */
  1071. cookie = console_srcu_read_lock();
  1072. con_flags = console_srcu_read_flags(con);
  1073. if (console_is_usable(con, con_flags, false))
  1074. backlog = nbcon_emit_one(&wctxt, false);
  1075. console_srcu_read_unlock(cookie);
  1076. cond_resched();
  1077. } while (backlog);
  1078. goto wait_for_event;
  1079. }
  1080. /**
  1081. * nbcon_irq_work - irq work to wake console printer thread
  1082. * @irq_work: The irq work to operate on
  1083. */
  1084. static void nbcon_irq_work(struct irq_work *irq_work)
  1085. {
  1086. struct console *con = container_of(irq_work, struct console, irq_work);
  1087. nbcon_kthread_wake(con);
  1088. }
  1089. static inline bool rcuwait_has_sleeper(struct rcuwait *w)
  1090. {
  1091. /*
  1092. * Guarantee any new records can be seen by tasks preparing to wait
  1093. * before this context checks if the rcuwait is empty.
  1094. *
  1095. * This full memory barrier pairs with the full memory barrier within
  1096. * set_current_state() of ___rcuwait_wait_event(), which is called
  1097. * after prepare_to_rcuwait() adds the waiter but before it has
  1098. * checked the wait condition.
  1099. *
  1100. * This pairs with nbcon_kthread_func:A.
  1101. */
  1102. smp_mb(); /* LMM(rcuwait_has_sleeper:A) */
  1103. return rcuwait_active(w);
  1104. }
  1105. /**
  1106. * nbcon_kthreads_wake - Wake up printing threads using irq_work
  1107. */
  1108. void nbcon_kthreads_wake(void)
  1109. {
  1110. struct console *con;
  1111. int cookie;
  1112. if (!printk_kthreads_running)
  1113. return;
  1114. cookie = console_srcu_read_lock();
  1115. for_each_console_srcu(con) {
  1116. if (!(console_srcu_read_flags(con) & CON_NBCON))
  1117. continue;
  1118. /*
  1119. * Only schedule irq_work if the printing thread is
  1120. * actively waiting. If not waiting, the thread will
  1121. * notice by itself that it has work to do.
  1122. */
  1123. if (rcuwait_has_sleeper(&con->rcuwait))
  1124. irq_work_queue(&con->irq_work);
  1125. }
  1126. console_srcu_read_unlock(cookie);
  1127. }
  1128. /*
  1129. * nbcon_kthread_stop - Stop a console printer thread
  1130. * @con: Console to operate on
  1131. */
  1132. void nbcon_kthread_stop(struct console *con)
  1133. {
  1134. lockdep_assert_console_list_lock_held();
  1135. if (!con->kthread)
  1136. return;
  1137. kthread_stop(con->kthread);
  1138. con->kthread = NULL;
  1139. }
  1140. /**
  1141. * nbcon_kthread_create - Create a console printer thread
  1142. * @con: Console to operate on
  1143. *
  1144. * Return: True if the kthread was started or already exists.
  1145. * Otherwise false and @con must not be registered.
  1146. *
  1147. * This function is called when it will be expected that nbcon consoles are
  1148. * flushed using the kthread. The messages printed with NBCON_PRIO_NORMAL
  1149. * will be no longer flushed by the legacy loop. This is why failure must
  1150. * be fatal for console registration.
  1151. *
  1152. * If @con was already registered and this function fails, @con must be
  1153. * unregistered before the global state variable @printk_kthreads_running
  1154. * can be set.
  1155. */
  1156. bool nbcon_kthread_create(struct console *con)
  1157. {
  1158. struct task_struct *kt;
  1159. lockdep_assert_console_list_lock_held();
  1160. if (con->kthread)
  1161. return true;
  1162. kt = kthread_run(nbcon_kthread_func, con, "pr/%s%d", con->name, con->index);
  1163. if (WARN_ON(IS_ERR(kt))) {
  1164. con_printk(KERN_ERR, con, "failed to start printing thread\n");
  1165. return false;
  1166. }
  1167. con->kthread = kt;
  1168. /*
  1169. * It is important that console printing threads are scheduled
  1170. * shortly after a printk call and with generous runtime budgets.
  1171. */
  1172. sched_set_normal(con->kthread, -20);
  1173. return true;
  1174. }
  1175. /* Track the nbcon emergency nesting per CPU. */
  1176. static DEFINE_PER_CPU(unsigned int, nbcon_pcpu_emergency_nesting);
  1177. static unsigned int early_nbcon_pcpu_emergency_nesting __initdata;
  1178. /**
  1179. * nbcon_get_cpu_emergency_nesting - Get the per CPU emergency nesting pointer
  1180. *
  1181. * Context: For reading, any context. For writing, any context which could
  1182. * not be migrated to another CPU.
  1183. * Return: Either a pointer to the per CPU emergency nesting counter of
  1184. * the current CPU or to the init data during early boot.
  1185. *
  1186. * The function is safe for reading per-CPU variables in any context because
  1187. * preemption is disabled if the current CPU is in the emergency state. See
  1188. * also nbcon_cpu_emergency_enter().
  1189. */
  1190. static __ref unsigned int *nbcon_get_cpu_emergency_nesting(void)
  1191. {
  1192. /*
  1193. * The value of __printk_percpu_data_ready gets set in normal
  1194. * context and before SMP initialization. As a result it could
  1195. * never change while inside an nbcon emergency section.
  1196. */
  1197. if (!printk_percpu_data_ready())
  1198. return &early_nbcon_pcpu_emergency_nesting;
  1199. return raw_cpu_ptr(&nbcon_pcpu_emergency_nesting);
  1200. }
  1201. /**
  1202. * nbcon_get_default_prio - The appropriate nbcon priority to use for nbcon
  1203. * printing on the current CPU
  1204. *
  1205. * Context: Any context.
  1206. * Return: The nbcon_prio to use for acquiring an nbcon console in this
  1207. * context for printing.
  1208. *
  1209. * The function is safe for reading per-CPU data in any context because
  1210. * preemption is disabled if the current CPU is in the emergency or panic
  1211. * state.
  1212. */
  1213. enum nbcon_prio nbcon_get_default_prio(void)
  1214. {
  1215. unsigned int *cpu_emergency_nesting;
  1216. if (this_cpu_in_panic())
  1217. return NBCON_PRIO_PANIC;
  1218. cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
  1219. if (*cpu_emergency_nesting)
  1220. return NBCON_PRIO_EMERGENCY;
  1221. return NBCON_PRIO_NORMAL;
  1222. }
  1223. /**
  1224. * nbcon_legacy_emit_next_record - Print one record for an nbcon console
  1225. * in legacy contexts
  1226. * @con: The console to print on
  1227. * @handover: Will be set to true if a printk waiter has taken over the
  1228. * console_lock, in which case the caller is no longer holding
  1229. * both the console_lock and the SRCU read lock. Otherwise it
  1230. * is set to false.
  1231. * @cookie: The cookie from the SRCU read lock.
  1232. * @use_atomic: Set true when called in an atomic or unknown context.
  1233. * It affects which nbcon callback will be used: write_atomic()
  1234. * or write_thread().
  1235. *
  1236. * When false, the write_thread() callback is used and would be
  1237. * called in a preemtible context unless disabled by the
  1238. * device_lock. The legacy handover is not allowed in this mode.
  1239. *
  1240. * Context: Any context except NMI.
  1241. * Return: True, when a record has been printed and there are still
  1242. * pending records. The caller might want to continue flushing.
  1243. *
  1244. * False, when there is no pending record, or when the console
  1245. * context cannot be acquired, or the ownership has been lost.
  1246. * The caller should give up. Either the job is done, cannot be
  1247. * done, or will be handled by the owning context.
  1248. *
  1249. * This function is meant to be called by console_flush_all() to print records
  1250. * on nbcon consoles from legacy context (printing via console unlocking).
  1251. * Essentially it is the nbcon version of console_emit_next_record().
  1252. */
  1253. bool nbcon_legacy_emit_next_record(struct console *con, bool *handover,
  1254. int cookie, bool use_atomic)
  1255. {
  1256. struct nbcon_write_context wctxt = { };
  1257. struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
  1258. unsigned long flags;
  1259. bool progress;
  1260. ctxt->console = con;
  1261. ctxt->prio = nbcon_get_default_prio();
  1262. if (use_atomic) {
  1263. /*
  1264. * In an atomic or unknown context, use the same procedure as
  1265. * in console_emit_next_record(). It allows to handover.
  1266. */
  1267. printk_safe_enter_irqsave(flags);
  1268. console_lock_spinning_enable();
  1269. stop_critical_timings();
  1270. }
  1271. progress = nbcon_emit_one(&wctxt, use_atomic);
  1272. if (use_atomic) {
  1273. start_critical_timings();
  1274. *handover = console_lock_spinning_disable_and_check(cookie);
  1275. printk_safe_exit_irqrestore(flags);
  1276. } else {
  1277. /* Non-atomic does not perform legacy spinning handovers. */
  1278. *handover = false;
  1279. }
  1280. return progress;
  1281. }
  1282. /**
  1283. * __nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
  1284. * write_atomic() callback
  1285. * @con: The nbcon console to flush
  1286. * @stop_seq: Flush up until this record
  1287. * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
  1288. *
  1289. * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on
  1290. * failure.
  1291. *
  1292. * Errors:
  1293. *
  1294. * -EPERM: Unable to acquire console ownership.
  1295. *
  1296. * -EAGAIN: Another context took over ownership while printing.
  1297. *
  1298. * -ENOENT: A record before @stop_seq is not available.
  1299. *
  1300. * If flushing up to @stop_seq was not successful, it only makes sense for the
  1301. * caller to try again when -EAGAIN was returned. When -EPERM is returned,
  1302. * this context is not allowed to acquire the console. When -ENOENT is
  1303. * returned, it cannot be expected that the unfinalized record will become
  1304. * available.
  1305. */
  1306. static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
  1307. bool allow_unsafe_takeover)
  1308. {
  1309. struct nbcon_write_context wctxt = { };
  1310. struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt);
  1311. int err = 0;
  1312. ctxt->console = con;
  1313. ctxt->spinwait_max_us = 2000;
  1314. ctxt->prio = nbcon_get_default_prio();
  1315. ctxt->allow_unsafe_takeover = allow_unsafe_takeover;
  1316. if (!nbcon_context_try_acquire(ctxt))
  1317. return -EPERM;
  1318. while (nbcon_seq_read(con) < stop_seq) {
  1319. /*
  1320. * nbcon_emit_next_record() returns false when the console was
  1321. * handed over or taken over. In both cases the context is no
  1322. * longer valid.
  1323. */
  1324. if (!nbcon_emit_next_record(&wctxt, true))
  1325. return -EAGAIN;
  1326. if (!ctxt->backlog) {
  1327. /* Are there reserved but not yet finalized records? */
  1328. if (nbcon_seq_read(con) < stop_seq)
  1329. err = -ENOENT;
  1330. break;
  1331. }
  1332. }
  1333. nbcon_context_release(ctxt);
  1334. return err;
  1335. }
  1336. /**
  1337. * nbcon_atomic_flush_pending_con - Flush specified nbcon console using its
  1338. * write_atomic() callback
  1339. * @con: The nbcon console to flush
  1340. * @stop_seq: Flush up until this record
  1341. * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
  1342. *
  1343. * This will stop flushing before @stop_seq if another context has ownership.
  1344. * That context is then responsible for the flushing. Likewise, if new records
  1345. * are added while this context was flushing and there is no other context
  1346. * to handle the printing, this context must also flush those records.
  1347. */
  1348. static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq,
  1349. bool allow_unsafe_takeover)
  1350. {
  1351. struct console_flush_type ft;
  1352. unsigned long flags;
  1353. int err;
  1354. again:
  1355. /*
  1356. * Atomic flushing does not use console driver synchronization (i.e.
  1357. * it does not hold the port lock for uart consoles). Therefore IRQs
  1358. * must be disabled to avoid being interrupted and then calling into
  1359. * a driver that will deadlock trying to acquire console ownership.
  1360. */
  1361. local_irq_save(flags);
  1362. err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
  1363. local_irq_restore(flags);
  1364. /*
  1365. * If there was a new owner (-EPERM, -EAGAIN), that context is
  1366. * responsible for completing.
  1367. *
  1368. * Do not wait for records not yet finalized (-ENOENT) to avoid a
  1369. * possible deadlock. They will either get flushed by the writer or
  1370. * eventually skipped on panic CPU.
  1371. */
  1372. if (err)
  1373. return;
  1374. /*
  1375. * If flushing was successful but more records are available, this
  1376. * context must flush those remaining records if the printer thread
  1377. * is not available do it.
  1378. */
  1379. printk_get_console_flush_type(&ft);
  1380. if (!ft.nbcon_offload &&
  1381. prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
  1382. stop_seq = prb_next_reserve_seq(prb);
  1383. goto again;
  1384. }
  1385. }
  1386. /**
  1387. * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their
  1388. * write_atomic() callback
  1389. * @stop_seq: Flush up until this record
  1390. * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers
  1391. */
  1392. static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover)
  1393. {
  1394. struct console *con;
  1395. int cookie;
  1396. cookie = console_srcu_read_lock();
  1397. for_each_console_srcu(con) {
  1398. short flags = console_srcu_read_flags(con);
  1399. if (!(flags & CON_NBCON))
  1400. continue;
  1401. if (!console_is_usable(con, flags, true))
  1402. continue;
  1403. if (nbcon_seq_read(con) >= stop_seq)
  1404. continue;
  1405. nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover);
  1406. }
  1407. console_srcu_read_unlock(cookie);
  1408. }
  1409. /**
  1410. * nbcon_atomic_flush_pending - Flush all nbcon consoles using their
  1411. * write_atomic() callback
  1412. *
  1413. * Flush the backlog up through the currently newest record. Any new
  1414. * records added while flushing will not be flushed if there is another
  1415. * context available to handle the flushing. This is to avoid one CPU
  1416. * printing unbounded because other CPUs continue to add records.
  1417. */
  1418. void nbcon_atomic_flush_pending(void)
  1419. {
  1420. __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false);
  1421. }
  1422. /**
  1423. * nbcon_atomic_flush_unsafe - Flush all nbcon consoles using their
  1424. * write_atomic() callback and allowing unsafe hostile takeovers
  1425. *
  1426. * Flush the backlog up through the currently newest record. Unsafe hostile
  1427. * takeovers will be performed, if necessary.
  1428. */
  1429. void nbcon_atomic_flush_unsafe(void)
  1430. {
  1431. __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true);
  1432. }
  1433. /**
  1434. * nbcon_cpu_emergency_enter - Enter an emergency section where printk()
  1435. * messages for that CPU are flushed directly
  1436. *
  1437. * Context: Any context. Disables preemption.
  1438. *
  1439. * When within an emergency section, printk() calls will attempt to flush any
  1440. * pending messages in the ringbuffer.
  1441. */
  1442. void nbcon_cpu_emergency_enter(void)
  1443. {
  1444. unsigned int *cpu_emergency_nesting;
  1445. preempt_disable();
  1446. cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
  1447. (*cpu_emergency_nesting)++;
  1448. }
  1449. /**
  1450. * nbcon_cpu_emergency_exit - Exit an emergency section
  1451. *
  1452. * Context: Within an emergency section. Enables preemption.
  1453. */
  1454. void nbcon_cpu_emergency_exit(void)
  1455. {
  1456. unsigned int *cpu_emergency_nesting;
  1457. cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting();
  1458. if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0))
  1459. (*cpu_emergency_nesting)--;
  1460. preempt_enable();
  1461. }
  1462. /**
  1463. * nbcon_alloc - Allocate and init the nbcon console specific data
  1464. * @con: Console to initialize
  1465. *
  1466. * Return: True if the console was fully allocated and initialized.
  1467. * Otherwise @con must not be registered.
  1468. *
  1469. * When allocation and init was successful, the console must be properly
  1470. * freed using nbcon_free() once it is no longer needed.
  1471. */
  1472. bool nbcon_alloc(struct console *con)
  1473. {
  1474. struct nbcon_state state = { };
  1475. /* The write_thread() callback is mandatory. */
  1476. if (WARN_ON(!con->write_thread))
  1477. return false;
  1478. rcuwait_init(&con->rcuwait);
  1479. init_irq_work(&con->irq_work, nbcon_irq_work);
  1480. atomic_long_set(&ACCESS_PRIVATE(con, nbcon_prev_seq), -1UL);
  1481. nbcon_state_set(con, &state);
  1482. /*
  1483. * Initialize @nbcon_seq to the highest possible sequence number so
  1484. * that practically speaking it will have nothing to print until a
  1485. * desired initial sequence number has been set via nbcon_seq_force().
  1486. */
  1487. atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), ULSEQ_MAX(prb));
  1488. if (con->flags & CON_BOOT) {
  1489. /*
  1490. * Boot console printing is synchronized with legacy console
  1491. * printing, so boot consoles can share the same global printk
  1492. * buffers.
  1493. */
  1494. con->pbufs = &printk_shared_pbufs;
  1495. } else {
  1496. con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL);
  1497. if (!con->pbufs) {
  1498. con_printk(KERN_ERR, con, "failed to allocate printing buffer\n");
  1499. return false;
  1500. }
  1501. if (printk_kthreads_running) {
  1502. if (!nbcon_kthread_create(con)) {
  1503. kfree(con->pbufs);
  1504. con->pbufs = NULL;
  1505. return false;
  1506. }
  1507. }
  1508. }
  1509. return true;
  1510. }
  1511. /**
  1512. * nbcon_free - Free and cleanup the nbcon console specific data
  1513. * @con: Console to free/cleanup nbcon data
  1514. */
  1515. void nbcon_free(struct console *con)
  1516. {
  1517. struct nbcon_state state = { };
  1518. if (printk_kthreads_running)
  1519. nbcon_kthread_stop(con);
  1520. nbcon_state_set(con, &state);
  1521. /* Boot consoles share global printk buffers. */
  1522. if (!(con->flags & CON_BOOT))
  1523. kfree(con->pbufs);
  1524. con->pbufs = NULL;
  1525. }
  1526. /**
  1527. * nbcon_device_try_acquire - Try to acquire nbcon console and enter unsafe
  1528. * section
  1529. * @con: The nbcon console to acquire
  1530. *
  1531. * Context: Under the locking mechanism implemented in
  1532. * @con->device_lock() including disabling migration.
  1533. * Return: True if the console was acquired. False otherwise.
  1534. *
  1535. * Console drivers will usually use their own internal synchronization
  1536. * mechasism to synchronize between console printing and non-printing
  1537. * activities (such as setting baud rates). However, nbcon console drivers
  1538. * supporting atomic consoles may also want to mark unsafe sections when
  1539. * performing non-printing activities in order to synchronize against their
  1540. * atomic_write() callback.
  1541. *
  1542. * This function acquires the nbcon console using priority NBCON_PRIO_NORMAL
  1543. * and marks it unsafe for handover/takeover.
  1544. */
  1545. bool nbcon_device_try_acquire(struct console *con)
  1546. {
  1547. struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
  1548. cant_migrate();
  1549. memset(ctxt, 0, sizeof(*ctxt));
  1550. ctxt->console = con;
  1551. ctxt->prio = NBCON_PRIO_NORMAL;
  1552. if (!nbcon_context_try_acquire(ctxt))
  1553. return false;
  1554. if (!nbcon_context_enter_unsafe(ctxt))
  1555. return false;
  1556. return true;
  1557. }
  1558. EXPORT_SYMBOL_GPL(nbcon_device_try_acquire);
  1559. /**
  1560. * nbcon_device_release - Exit unsafe section and release the nbcon console
  1561. * @con: The nbcon console acquired in nbcon_device_try_acquire()
  1562. */
  1563. void nbcon_device_release(struct console *con)
  1564. {
  1565. struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt);
  1566. struct console_flush_type ft;
  1567. int cookie;
  1568. if (!nbcon_context_exit_unsafe(ctxt))
  1569. return;
  1570. nbcon_context_release(ctxt);
  1571. /*
  1572. * This context must flush any new records added while the console
  1573. * was locked if the printer thread is not available to do it. The
  1574. * console_srcu_read_lock must be taken to ensure the console is
  1575. * usable throughout flushing.
  1576. */
  1577. cookie = console_srcu_read_lock();
  1578. printk_get_console_flush_type(&ft);
  1579. if (console_is_usable(con, console_srcu_read_flags(con), true) &&
  1580. !ft.nbcon_offload &&
  1581. prb_read_valid(prb, nbcon_seq_read(con), NULL)) {
  1582. /*
  1583. * If nbcon_atomic flushing is not available, fallback to
  1584. * using the legacy loop.
  1585. */
  1586. if (ft.nbcon_atomic) {
  1587. __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false);
  1588. } else if (ft.legacy_direct) {
  1589. if (console_trylock())
  1590. console_unlock();
  1591. } else if (ft.legacy_offload) {
  1592. printk_trigger_flush();
  1593. }
  1594. }
  1595. console_srcu_read_unlock(cookie);
  1596. }
  1597. EXPORT_SYMBOL_GPL(nbcon_device_release);