sclp.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * core function to access sclp interface
  4. *
  5. * Copyright IBM Corp. 1999, 2009
  6. *
  7. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  8. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  9. */
  10. #include <linux/kernel_stat.h>
  11. #include <linux/module.h>
  12. #include <linux/err.h>
  13. #include <linux/panic_notifier.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/timer.h>
  17. #include <linux/reboot.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/init.h>
  20. #include <linux/platform_device.h>
  21. #include <asm/types.h>
  22. #include <asm/irq.h>
  23. #include <asm/debug.h>
  24. #include "sclp.h"
  25. #define SCLP_HEADER "sclp: "
  26. struct sclp_trace_entry {
  27. char id[4] __nonstring;
  28. u32 a;
  29. u64 b;
  30. };
  31. #define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry)
  32. #define SCLP_TRACE_MAX_SIZE 128
  33. #define SCLP_TRACE_EVENT_MAX_SIZE 64
  34. /* Debug trace area intended for all entries in abbreviated form. */
  35. DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
  36. &debug_hex_ascii_view);
  37. /* Error trace area intended for full entries relating to failed requests. */
  38. DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
  39. SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
  40. /* Lock to protect internal data consistency. */
  41. static DEFINE_SPINLOCK(sclp_lock);
  42. /* Mask of events that we can send to the sclp interface. */
  43. static sccb_mask_t sclp_receive_mask;
  44. /* Mask of events that we can receive from the sclp interface. */
  45. static sccb_mask_t sclp_send_mask;
  46. /* List of registered event listeners and senders. */
  47. static LIST_HEAD(sclp_reg_list);
  48. /* List of queued requests. */
  49. static LIST_HEAD(sclp_req_queue);
  50. /* Data for read and init requests. */
  51. static struct sclp_req sclp_read_req;
  52. static struct sclp_req sclp_init_req;
  53. static void *sclp_read_sccb;
  54. static struct init_sccb *sclp_init_sccb;
  55. /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
  56. int sclp_console_pages = SCLP_CONSOLE_PAGES;
  57. /* Flag to indicate if buffer pages are dropped on buffer full condition */
  58. bool sclp_console_drop = true;
  59. /* Number of times the console dropped buffer pages */
  60. unsigned long sclp_console_full;
  61. /* The currently active SCLP command word. */
  62. static sclp_cmdw_t active_cmd;
  63. static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
  64. {
  65. struct sclp_trace_entry e;
  66. memset(&e, 0, sizeof(e));
  67. strtomem(e.id, id);
  68. e.a = a;
  69. e.b = b;
  70. debug_event(&sclp_debug, prio, &e, sizeof(e));
  71. if (err)
  72. debug_event(&sclp_debug_err, 0, &e, sizeof(e));
  73. }
  74. static inline int no_zeroes_len(void *data, int len)
  75. {
  76. char *d = data;
  77. /* Minimize trace area usage by not tracing trailing zeroes. */
  78. while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
  79. len--;
  80. return len;
  81. }
  82. static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
  83. {
  84. debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
  85. if (errlen)
  86. debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
  87. }
  88. static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
  89. {
  90. struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
  91. int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
  92. /* Full SCCB tracing if debug level is set to max. */
  93. if (sclp_debug.level == DEBUG_MAX_LEVEL)
  94. return len;
  95. /* Minimal tracing for console writes. */
  96. if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
  97. (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG))
  98. limit = SCLP_TRACE_ENTRY_SIZE;
  99. return min(len, limit);
  100. }
  101. static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
  102. sclp_cmdw_t cmd, struct sccb_header *sccb,
  103. bool err)
  104. {
  105. sclp_trace(prio, id, a, b, err);
  106. if (sccb) {
  107. sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
  108. err ? sccb->length : 0);
  109. }
  110. }
  111. static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
  112. struct evbuf_header *evbuf, bool err)
  113. {
  114. sclp_trace(prio, id, a, b, err);
  115. sclp_trace_bin(prio + 1, evbuf,
  116. min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
  117. err ? evbuf->length : 0);
  118. }
  119. static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
  120. bool err)
  121. {
  122. struct sccb_header *sccb = req->sccb;
  123. union {
  124. struct {
  125. u16 status;
  126. u16 response;
  127. u16 timeout;
  128. u16 start_count;
  129. };
  130. u64 b;
  131. } summary;
  132. summary.status = req->status;
  133. summary.response = sccb ? sccb->response_code : 0;
  134. summary.timeout = (u16)req->queue_timeout;
  135. summary.start_count = (u16)req->start_count;
  136. sclp_trace(prio, id, __pa(sccb), summary.b, err);
  137. }
  138. static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
  139. struct sclp_register *reg)
  140. {
  141. struct {
  142. u64 receive;
  143. u64 send;
  144. } d;
  145. d.receive = reg->receive_mask;
  146. d.send = reg->send_mask;
  147. sclp_trace(prio, id, a, b, false);
  148. sclp_trace_bin(prio, &d, sizeof(d), 0);
  149. }
  150. static int __init sclp_setup_console_pages(char *str)
  151. {
  152. int pages, rc;
  153. rc = kstrtoint(str, 0, &pages);
  154. if (!rc && pages >= SCLP_CONSOLE_PAGES)
  155. sclp_console_pages = pages;
  156. return 1;
  157. }
  158. __setup("sclp_con_pages=", sclp_setup_console_pages);
  159. static int __init sclp_setup_console_drop(char *str)
  160. {
  161. return kstrtobool(str, &sclp_console_drop) == 0;
  162. }
  163. __setup("sclp_con_drop=", sclp_setup_console_drop);
  164. /* Timer for request retries. */
  165. static struct timer_list sclp_request_timer;
  166. /* Timer for queued requests. */
  167. static struct timer_list sclp_queue_timer;
  168. /* Internal state: is a request active at the sclp? */
  169. static volatile enum sclp_running_state_t {
  170. sclp_running_state_idle,
  171. sclp_running_state_running,
  172. sclp_running_state_reset_pending
  173. } sclp_running_state = sclp_running_state_idle;
  174. /* Internal state: is a read request pending? */
  175. static volatile enum sclp_reading_state_t {
  176. sclp_reading_state_idle,
  177. sclp_reading_state_reading
  178. } sclp_reading_state = sclp_reading_state_idle;
  179. /* Internal state: is the driver currently serving requests? */
  180. static volatile enum sclp_activation_state_t {
  181. sclp_activation_state_active,
  182. sclp_activation_state_deactivating,
  183. sclp_activation_state_inactive,
  184. sclp_activation_state_activating
  185. } sclp_activation_state = sclp_activation_state_active;
  186. /* Internal state: is an init mask request pending? */
  187. static volatile enum sclp_mask_state_t {
  188. sclp_mask_state_idle,
  189. sclp_mask_state_initializing
  190. } sclp_mask_state = sclp_mask_state_idle;
  191. /* Maximum retry counts */
  192. #define SCLP_INIT_RETRY 3
  193. #define SCLP_MASK_RETRY 3
  194. /* Timeout intervals in seconds.*/
  195. #define SCLP_BUSY_INTERVAL 10
  196. #define SCLP_RETRY_INTERVAL 30
  197. static void sclp_request_timeout(bool force_restart);
  198. static void sclp_process_queue(void);
  199. static void __sclp_make_read_req(void);
  200. static int sclp_init_mask(int calculate);
  201. static void
  202. __sclp_queue_read_req(void)
  203. {
  204. if (sclp_reading_state == sclp_reading_state_idle) {
  205. sclp_reading_state = sclp_reading_state_reading;
  206. __sclp_make_read_req();
  207. /* Add request to head of queue */
  208. list_add(&sclp_read_req.list, &sclp_req_queue);
  209. }
  210. }
  211. /* Set up request retry timer. Called while sclp_lock is locked. */
  212. static inline void
  213. __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
  214. {
  215. del_timer(&sclp_request_timer);
  216. sclp_request_timer.function = cb;
  217. sclp_request_timer.expires = jiffies + time;
  218. add_timer(&sclp_request_timer);
  219. }
  220. static void sclp_request_timeout_restart(struct timer_list *unused)
  221. {
  222. sclp_request_timeout(true);
  223. }
  224. static void sclp_request_timeout_normal(struct timer_list *unused)
  225. {
  226. sclp_request_timeout(false);
  227. }
  228. /* Request timeout handler. Restart the request queue. If force_restart,
  229. * force restart of running request. */
  230. static void sclp_request_timeout(bool force_restart)
  231. {
  232. unsigned long flags;
  233. /* TMO: A timeout occurred (a=force_restart) */
  234. sclp_trace(2, "TMO", force_restart, 0, true);
  235. spin_lock_irqsave(&sclp_lock, flags);
  236. if (force_restart) {
  237. if (sclp_running_state == sclp_running_state_running) {
  238. /* Break running state and queue NOP read event request
  239. * to get a defined interface state. */
  240. __sclp_queue_read_req();
  241. sclp_running_state = sclp_running_state_idle;
  242. }
  243. } else {
  244. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  245. sclp_request_timeout_normal);
  246. }
  247. spin_unlock_irqrestore(&sclp_lock, flags);
  248. sclp_process_queue();
  249. }
  250. /*
  251. * Returns the expire value in jiffies of the next pending request timeout,
  252. * if any. Needs to be called with sclp_lock.
  253. */
  254. static unsigned long __sclp_req_queue_find_next_timeout(void)
  255. {
  256. unsigned long expires_next = 0;
  257. struct sclp_req *req;
  258. list_for_each_entry(req, &sclp_req_queue, list) {
  259. if (!req->queue_expires)
  260. continue;
  261. if (!expires_next ||
  262. (time_before(req->queue_expires, expires_next)))
  263. expires_next = req->queue_expires;
  264. }
  265. return expires_next;
  266. }
  267. /*
  268. * Returns expired request, if any, and removes it from the list.
  269. */
  270. static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
  271. {
  272. unsigned long flags, now;
  273. struct sclp_req *req;
  274. spin_lock_irqsave(&sclp_lock, flags);
  275. now = jiffies;
  276. /* Don't need list_for_each_safe because we break out after list_del */
  277. list_for_each_entry(req, &sclp_req_queue, list) {
  278. if (!req->queue_expires)
  279. continue;
  280. if (time_before_eq(req->queue_expires, now)) {
  281. if (req->status == SCLP_REQ_QUEUED) {
  282. req->status = SCLP_REQ_QUEUED_TIMEOUT;
  283. list_del(&req->list);
  284. goto out;
  285. }
  286. }
  287. }
  288. req = NULL;
  289. out:
  290. spin_unlock_irqrestore(&sclp_lock, flags);
  291. return req;
  292. }
  293. /*
  294. * Timeout handler for queued requests. Removes request from list and
  295. * invokes callback. This timer can be set per request in situations where
  296. * waiting too long would be harmful to the system, e.g. during SE reboot.
  297. */
  298. static void sclp_req_queue_timeout(struct timer_list *unused)
  299. {
  300. unsigned long flags, expires_next;
  301. struct sclp_req *req;
  302. do {
  303. req = __sclp_req_queue_remove_expired_req();
  304. if (req) {
  305. /* RQTM: Request timed out (a=sccb, b=summary) */
  306. sclp_trace_req(2, "RQTM", req, true);
  307. }
  308. if (req && req->callback)
  309. req->callback(req, req->callback_data);
  310. } while (req);
  311. spin_lock_irqsave(&sclp_lock, flags);
  312. expires_next = __sclp_req_queue_find_next_timeout();
  313. if (expires_next)
  314. mod_timer(&sclp_queue_timer, expires_next);
  315. spin_unlock_irqrestore(&sclp_lock, flags);
  316. }
  317. static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
  318. {
  319. static u64 srvc_count;
  320. int rc;
  321. /* SRV1: Service call about to be issued (a=command, b=sccb address) */
  322. sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
  323. rc = sclp_service_call(command, sccb);
  324. /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
  325. sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
  326. if (rc == 0)
  327. active_cmd = command;
  328. return rc;
  329. }
  330. /* Try to start a request. Return zero if the request was successfully
  331. * started or if it will be started at a later time. Return non-zero otherwise.
  332. * Called while sclp_lock is locked. */
  333. static int
  334. __sclp_start_request(struct sclp_req *req)
  335. {
  336. int rc;
  337. if (sclp_running_state != sclp_running_state_idle)
  338. return 0;
  339. del_timer(&sclp_request_timer);
  340. rc = sclp_service_call_trace(req->command, req->sccb);
  341. req->start_count++;
  342. if (rc == 0) {
  343. /* Successfully started request */
  344. req->status = SCLP_REQ_RUNNING;
  345. sclp_running_state = sclp_running_state_running;
  346. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  347. sclp_request_timeout_restart);
  348. return 0;
  349. } else if (rc == -EBUSY) {
  350. /* Try again later */
  351. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  352. sclp_request_timeout_normal);
  353. return 0;
  354. }
  355. /* Request failed */
  356. req->status = SCLP_REQ_FAILED;
  357. return rc;
  358. }
  359. /* Try to start queued requests. */
  360. static void
  361. sclp_process_queue(void)
  362. {
  363. struct sclp_req *req;
  364. int rc;
  365. unsigned long flags;
  366. spin_lock_irqsave(&sclp_lock, flags);
  367. if (sclp_running_state != sclp_running_state_idle) {
  368. spin_unlock_irqrestore(&sclp_lock, flags);
  369. return;
  370. }
  371. del_timer(&sclp_request_timer);
  372. while (!list_empty(&sclp_req_queue)) {
  373. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  374. rc = __sclp_start_request(req);
  375. if (rc == 0)
  376. break;
  377. /* Request failed */
  378. if (req->start_count > 1) {
  379. /* Cannot abort already submitted request - could still
  380. * be active at the SCLP */
  381. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  382. sclp_request_timeout_normal);
  383. break;
  384. }
  385. /* Post-processing for aborted request */
  386. list_del(&req->list);
  387. /* RQAB: Request aborted (a=sccb, b=summary) */
  388. sclp_trace_req(2, "RQAB", req, true);
  389. if (req->callback) {
  390. spin_unlock_irqrestore(&sclp_lock, flags);
  391. req->callback(req, req->callback_data);
  392. spin_lock_irqsave(&sclp_lock, flags);
  393. }
  394. }
  395. spin_unlock_irqrestore(&sclp_lock, flags);
  396. }
  397. static int __sclp_can_add_request(struct sclp_req *req)
  398. {
  399. if (req == &sclp_init_req)
  400. return 1;
  401. if (sclp_init_state != sclp_init_state_initialized)
  402. return 0;
  403. if (sclp_activation_state != sclp_activation_state_active)
  404. return 0;
  405. return 1;
  406. }
  407. /* Queue a new request. Return zero on success, non-zero otherwise. */
  408. int
  409. sclp_add_request(struct sclp_req *req)
  410. {
  411. unsigned long flags;
  412. int rc;
  413. spin_lock_irqsave(&sclp_lock, flags);
  414. if (!__sclp_can_add_request(req)) {
  415. spin_unlock_irqrestore(&sclp_lock, flags);
  416. return -EIO;
  417. }
  418. /* RQAD: Request was added (a=sccb, b=caller) */
  419. sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false);
  420. req->status = SCLP_REQ_QUEUED;
  421. req->start_count = 0;
  422. list_add_tail(&req->list, &sclp_req_queue);
  423. rc = 0;
  424. if (req->queue_timeout) {
  425. req->queue_expires = jiffies + req->queue_timeout * HZ;
  426. if (!timer_pending(&sclp_queue_timer) ||
  427. time_after(sclp_queue_timer.expires, req->queue_expires))
  428. mod_timer(&sclp_queue_timer, req->queue_expires);
  429. } else
  430. req->queue_expires = 0;
  431. /* Start if request is first in list */
  432. if (sclp_running_state == sclp_running_state_idle &&
  433. req->list.prev == &sclp_req_queue) {
  434. rc = __sclp_start_request(req);
  435. if (rc)
  436. list_del(&req->list);
  437. }
  438. spin_unlock_irqrestore(&sclp_lock, flags);
  439. return rc;
  440. }
  441. EXPORT_SYMBOL(sclp_add_request);
  442. /* Dispatch events found in request buffer to registered listeners. Return 0
  443. * if all events were dispatched, non-zero otherwise. */
  444. static int
  445. sclp_dispatch_evbufs(struct sccb_header *sccb)
  446. {
  447. unsigned long flags;
  448. struct evbuf_header *evbuf;
  449. struct list_head *l;
  450. struct sclp_register *reg;
  451. int offset;
  452. int rc;
  453. spin_lock_irqsave(&sclp_lock, flags);
  454. rc = 0;
  455. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  456. offset += evbuf->length) {
  457. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  458. /* Check for malformed hardware response */
  459. if (evbuf->length == 0)
  460. break;
  461. /* Search for event handler */
  462. reg = NULL;
  463. list_for_each(l, &sclp_reg_list) {
  464. reg = list_entry(l, struct sclp_register, list);
  465. if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
  466. break;
  467. else
  468. reg = NULL;
  469. }
  470. /* EVNT: Event callback (b=receiver) */
  471. sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
  472. evbuf, !reg);
  473. if (reg && reg->receiver_fn) {
  474. spin_unlock_irqrestore(&sclp_lock, flags);
  475. reg->receiver_fn(evbuf);
  476. spin_lock_irqsave(&sclp_lock, flags);
  477. } else if (reg == NULL)
  478. rc = -EOPNOTSUPP;
  479. }
  480. spin_unlock_irqrestore(&sclp_lock, flags);
  481. return rc;
  482. }
  483. /* Read event data request callback. */
  484. static void
  485. sclp_read_cb(struct sclp_req *req, void *data)
  486. {
  487. unsigned long flags;
  488. struct sccb_header *sccb;
  489. sccb = (struct sccb_header *) req->sccb;
  490. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  491. sccb->response_code == 0x220))
  492. sclp_dispatch_evbufs(sccb);
  493. spin_lock_irqsave(&sclp_lock, flags);
  494. sclp_reading_state = sclp_reading_state_idle;
  495. spin_unlock_irqrestore(&sclp_lock, flags);
  496. }
  497. /* Prepare read event data request. Called while sclp_lock is locked. */
  498. static void __sclp_make_read_req(void)
  499. {
  500. struct sccb_header *sccb;
  501. sccb = (struct sccb_header *) sclp_read_sccb;
  502. clear_page(sccb);
  503. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  504. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  505. sclp_read_req.status = SCLP_REQ_QUEUED;
  506. sclp_read_req.start_count = 0;
  507. sclp_read_req.callback = sclp_read_cb;
  508. sclp_read_req.sccb = sccb;
  509. sccb->length = PAGE_SIZE;
  510. sccb->function_code = 0;
  511. sccb->control_mask[2] = 0x80;
  512. }
  513. /* Search request list for request with matching sccb. Return request if found,
  514. * NULL otherwise. Called while sclp_lock is locked. */
  515. static inline struct sclp_req *
  516. __sclp_find_req(u32 sccb)
  517. {
  518. struct list_head *l;
  519. struct sclp_req *req;
  520. list_for_each(l, &sclp_req_queue) {
  521. req = list_entry(l, struct sclp_req, list);
  522. if (sccb == __pa(req->sccb))
  523. return req;
  524. }
  525. return NULL;
  526. }
  527. static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
  528. {
  529. struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
  530. struct evbuf_header *evbuf;
  531. u16 response;
  532. if (!sccb)
  533. return true;
  534. /* Check SCCB response. */
  535. response = sccb->response_code & 0xff;
  536. if (response != 0x10 && response != 0x20)
  537. return false;
  538. /* Check event-processed flag on outgoing events. */
  539. if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
  540. evbuf = (struct evbuf_header *)(sccb + 1);
  541. if (!(evbuf->flags & 0x80))
  542. return false;
  543. }
  544. return true;
  545. }
  546. /* Handler for external interruption. Perform request post-processing.
  547. * Prepare read event data request if necessary. Start processing of next
  548. * request on queue. */
  549. static void sclp_interrupt_handler(struct ext_code ext_code,
  550. unsigned int param32, unsigned long param64)
  551. {
  552. struct sclp_req *req;
  553. u32 finished_sccb;
  554. u32 evbuf_pending;
  555. inc_irq_stat(IRQEXT_SCP);
  556. spin_lock(&sclp_lock);
  557. finished_sccb = param32 & 0xfffffff8;
  558. evbuf_pending = param32 & 0x3;
  559. /* INT: Interrupt received (a=intparm, b=cmd) */
  560. sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
  561. (struct sccb_header *)__va(finished_sccb),
  562. !ok_response(finished_sccb, active_cmd));
  563. if (finished_sccb) {
  564. del_timer(&sclp_request_timer);
  565. sclp_running_state = sclp_running_state_reset_pending;
  566. req = __sclp_find_req(finished_sccb);
  567. if (req) {
  568. /* Request post-processing */
  569. list_del(&req->list);
  570. req->status = SCLP_REQ_DONE;
  571. /* RQOK: Request success (a=sccb, b=summary) */
  572. sclp_trace_req(2, "RQOK", req, false);
  573. if (req->callback) {
  574. spin_unlock(&sclp_lock);
  575. req->callback(req, req->callback_data);
  576. spin_lock(&sclp_lock);
  577. }
  578. } else {
  579. /* UNEX: Unexpected SCCB completion (a=sccb address) */
  580. sclp_trace(0, "UNEX", finished_sccb, 0, true);
  581. }
  582. sclp_running_state = sclp_running_state_idle;
  583. active_cmd = 0;
  584. }
  585. if (evbuf_pending &&
  586. sclp_activation_state == sclp_activation_state_active)
  587. __sclp_queue_read_req();
  588. spin_unlock(&sclp_lock);
  589. sclp_process_queue();
  590. }
  591. /* Convert interval in jiffies to TOD ticks. */
  592. static inline u64
  593. sclp_tod_from_jiffies(unsigned long jiffies)
  594. {
  595. return (u64) (jiffies / HZ) << 32;
  596. }
  597. /* Wait until a currently running request finished. Note: while this function
  598. * is running, no timers are served on the calling CPU. */
  599. void
  600. sclp_sync_wait(void)
  601. {
  602. unsigned long long old_tick;
  603. struct ctlreg cr0, cr0_sync;
  604. unsigned long flags;
  605. static u64 sync_count;
  606. u64 timeout;
  607. int irq_context;
  608. /* SYN1: Synchronous wait start (a=runstate, b=sync count) */
  609. sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
  610. /* We'll be disabling timer interrupts, so we need a custom timeout
  611. * mechanism */
  612. timeout = 0;
  613. if (timer_pending(&sclp_request_timer)) {
  614. /* Get timeout TOD value */
  615. timeout = get_tod_clock_fast() +
  616. sclp_tod_from_jiffies(sclp_request_timer.expires -
  617. jiffies);
  618. }
  619. local_irq_save(flags);
  620. /* Prevent bottom half from executing once we force interrupts open */
  621. irq_context = in_interrupt();
  622. if (!irq_context)
  623. local_bh_disable();
  624. /* Enable service-signal interruption, disable timer interrupts */
  625. old_tick = local_tick_disable();
  626. trace_hardirqs_on();
  627. local_ctl_store(0, &cr0);
  628. cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
  629. cr0_sync.val |= 1UL << (63 - 54);
  630. local_ctl_load(0, &cr0_sync);
  631. arch_local_irq_enable_external();
  632. /* Loop until driver state indicates finished request */
  633. while (sclp_running_state != sclp_running_state_idle) {
  634. /* Check for expired request timer */
  635. if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
  636. sclp_request_timer.function(&sclp_request_timer);
  637. cpu_relax();
  638. }
  639. local_irq_disable();
  640. local_ctl_load(0, &cr0);
  641. if (!irq_context)
  642. _local_bh_enable();
  643. local_tick_enable(old_tick);
  644. local_irq_restore(flags);
  645. /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
  646. sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
  647. }
  648. EXPORT_SYMBOL(sclp_sync_wait);
  649. /* Dispatch changes in send and receive mask to registered listeners. */
  650. static void
  651. sclp_dispatch_state_change(void)
  652. {
  653. struct list_head *l;
  654. struct sclp_register *reg;
  655. unsigned long flags;
  656. sccb_mask_t receive_mask;
  657. sccb_mask_t send_mask;
  658. do {
  659. spin_lock_irqsave(&sclp_lock, flags);
  660. reg = NULL;
  661. list_for_each(l, &sclp_reg_list) {
  662. reg = list_entry(l, struct sclp_register, list);
  663. receive_mask = reg->send_mask & sclp_receive_mask;
  664. send_mask = reg->receive_mask & sclp_send_mask;
  665. if (reg->sclp_receive_mask != receive_mask ||
  666. reg->sclp_send_mask != send_mask) {
  667. reg->sclp_receive_mask = receive_mask;
  668. reg->sclp_send_mask = send_mask;
  669. break;
  670. } else
  671. reg = NULL;
  672. }
  673. spin_unlock_irqrestore(&sclp_lock, flags);
  674. if (reg && reg->state_change_fn) {
  675. /* STCG: State-change callback (b=callback) */
  676. sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
  677. false);
  678. reg->state_change_fn(reg);
  679. }
  680. } while (reg);
  681. }
  682. struct sclp_statechangebuf {
  683. struct evbuf_header header;
  684. u8 validity_sclp_active_facility_mask : 1;
  685. u8 validity_sclp_receive_mask : 1;
  686. u8 validity_sclp_send_mask : 1;
  687. u8 validity_read_data_function_mask : 1;
  688. u16 _zeros : 12;
  689. u16 mask_length;
  690. u64 sclp_active_facility_mask;
  691. u8 masks[2 * 1021 + 4]; /* variable length */
  692. /*
  693. * u8 sclp_receive_mask[mask_length];
  694. * u8 sclp_send_mask[mask_length];
  695. * u32 read_data_function_mask;
  696. */
  697. } __attribute__((packed));
  698. /* State change event callback. Inform listeners of changes. */
  699. static void
  700. sclp_state_change_cb(struct evbuf_header *evbuf)
  701. {
  702. unsigned long flags;
  703. struct sclp_statechangebuf *scbuf;
  704. BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
  705. scbuf = (struct sclp_statechangebuf *) evbuf;
  706. spin_lock_irqsave(&sclp_lock, flags);
  707. if (scbuf->validity_sclp_receive_mask)
  708. sclp_receive_mask = sccb_get_recv_mask(scbuf);
  709. if (scbuf->validity_sclp_send_mask)
  710. sclp_send_mask = sccb_get_send_mask(scbuf);
  711. spin_unlock_irqrestore(&sclp_lock, flags);
  712. if (scbuf->validity_sclp_active_facility_mask)
  713. sclp.facilities = scbuf->sclp_active_facility_mask;
  714. sclp_dispatch_state_change();
  715. }
  716. static struct sclp_register sclp_state_change_event = {
  717. .receive_mask = EVTYP_STATECHANGE_MASK,
  718. .receiver_fn = sclp_state_change_cb
  719. };
  720. /* Calculate receive and send mask of currently registered listeners.
  721. * Called while sclp_lock is locked. */
  722. static inline void
  723. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  724. {
  725. struct list_head *l;
  726. struct sclp_register *t;
  727. *receive_mask = 0;
  728. *send_mask = 0;
  729. list_for_each(l, &sclp_reg_list) {
  730. t = list_entry(l, struct sclp_register, list);
  731. *receive_mask |= t->receive_mask;
  732. *send_mask |= t->send_mask;
  733. }
  734. }
  735. /* Register event listener. Return 0 on success, non-zero otherwise. */
  736. int
  737. sclp_register(struct sclp_register *reg)
  738. {
  739. unsigned long flags;
  740. sccb_mask_t receive_mask;
  741. sccb_mask_t send_mask;
  742. int rc;
  743. /* REG: Event listener registered (b=caller) */
  744. sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
  745. rc = sclp_init();
  746. if (rc)
  747. return rc;
  748. spin_lock_irqsave(&sclp_lock, flags);
  749. /* Check event mask for collisions */
  750. __sclp_get_mask(&receive_mask, &send_mask);
  751. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  752. spin_unlock_irqrestore(&sclp_lock, flags);
  753. return -EBUSY;
  754. }
  755. /* Trigger initial state change callback */
  756. reg->sclp_receive_mask = 0;
  757. reg->sclp_send_mask = 0;
  758. list_add(&reg->list, &sclp_reg_list);
  759. spin_unlock_irqrestore(&sclp_lock, flags);
  760. rc = sclp_init_mask(1);
  761. if (rc) {
  762. spin_lock_irqsave(&sclp_lock, flags);
  763. list_del(&reg->list);
  764. spin_unlock_irqrestore(&sclp_lock, flags);
  765. }
  766. return rc;
  767. }
  768. EXPORT_SYMBOL(sclp_register);
  769. /* Unregister event listener. */
  770. void
  771. sclp_unregister(struct sclp_register *reg)
  772. {
  773. unsigned long flags;
  774. /* UREG: Event listener unregistered (b=caller) */
  775. sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
  776. spin_lock_irqsave(&sclp_lock, flags);
  777. list_del(&reg->list);
  778. spin_unlock_irqrestore(&sclp_lock, flags);
  779. sclp_init_mask(1);
  780. }
  781. EXPORT_SYMBOL(sclp_unregister);
  782. /* Remove event buffers which are marked processed. Return the number of
  783. * remaining event buffers. */
  784. int
  785. sclp_remove_processed(struct sccb_header *sccb)
  786. {
  787. struct evbuf_header *evbuf;
  788. int unprocessed;
  789. u16 remaining;
  790. evbuf = (struct evbuf_header *) (sccb + 1);
  791. unprocessed = 0;
  792. remaining = sccb->length - sizeof(struct sccb_header);
  793. while (remaining > 0) {
  794. remaining -= evbuf->length;
  795. if (evbuf->flags & 0x80) {
  796. sccb->length -= evbuf->length;
  797. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  798. remaining);
  799. } else {
  800. unprocessed++;
  801. evbuf = (struct evbuf_header *)
  802. ((addr_t) evbuf + evbuf->length);
  803. }
  804. }
  805. return unprocessed;
  806. }
  807. EXPORT_SYMBOL(sclp_remove_processed);
  808. /* Prepare init mask request. Called while sclp_lock is locked. */
  809. static inline void
  810. __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
  811. {
  812. struct init_sccb *sccb = sclp_init_sccb;
  813. clear_page(sccb);
  814. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  815. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  816. sclp_init_req.status = SCLP_REQ_FILLED;
  817. sclp_init_req.start_count = 0;
  818. sclp_init_req.callback = NULL;
  819. sclp_init_req.callback_data = NULL;
  820. sclp_init_req.sccb = sccb;
  821. sccb->header.length = sizeof(*sccb);
  822. if (sclp_mask_compat_mode)
  823. sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
  824. else
  825. sccb->mask_length = sizeof(sccb_mask_t);
  826. sccb_set_recv_mask(sccb, receive_mask);
  827. sccb_set_send_mask(sccb, send_mask);
  828. sccb_set_sclp_recv_mask(sccb, 0);
  829. sccb_set_sclp_send_mask(sccb, 0);
  830. }
  831. /* Start init mask request. If calculate is non-zero, calculate the mask as
  832. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  833. * success, non-zero otherwise. */
  834. static int
  835. sclp_init_mask(int calculate)
  836. {
  837. unsigned long flags;
  838. struct init_sccb *sccb = sclp_init_sccb;
  839. sccb_mask_t receive_mask;
  840. sccb_mask_t send_mask;
  841. int retry;
  842. int rc;
  843. unsigned long wait;
  844. spin_lock_irqsave(&sclp_lock, flags);
  845. /* Check if interface is in appropriate state */
  846. if (sclp_mask_state != sclp_mask_state_idle) {
  847. spin_unlock_irqrestore(&sclp_lock, flags);
  848. return -EBUSY;
  849. }
  850. if (sclp_activation_state == sclp_activation_state_inactive) {
  851. spin_unlock_irqrestore(&sclp_lock, flags);
  852. return -EINVAL;
  853. }
  854. sclp_mask_state = sclp_mask_state_initializing;
  855. /* Determine mask */
  856. if (calculate)
  857. __sclp_get_mask(&receive_mask, &send_mask);
  858. else {
  859. receive_mask = 0;
  860. send_mask = 0;
  861. }
  862. rc = -EIO;
  863. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  864. /* Prepare request */
  865. __sclp_make_init_req(receive_mask, send_mask);
  866. spin_unlock_irqrestore(&sclp_lock, flags);
  867. if (sclp_add_request(&sclp_init_req)) {
  868. /* Try again later */
  869. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  870. while (time_before(jiffies, wait))
  871. sclp_sync_wait();
  872. spin_lock_irqsave(&sclp_lock, flags);
  873. continue;
  874. }
  875. while (sclp_init_req.status != SCLP_REQ_DONE &&
  876. sclp_init_req.status != SCLP_REQ_FAILED)
  877. sclp_sync_wait();
  878. spin_lock_irqsave(&sclp_lock, flags);
  879. if (sclp_init_req.status == SCLP_REQ_DONE &&
  880. sccb->header.response_code == 0x20) {
  881. /* Successful request */
  882. if (calculate) {
  883. sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
  884. sclp_send_mask = sccb_get_sclp_send_mask(sccb);
  885. } else {
  886. sclp_receive_mask = 0;
  887. sclp_send_mask = 0;
  888. }
  889. spin_unlock_irqrestore(&sclp_lock, flags);
  890. sclp_dispatch_state_change();
  891. spin_lock_irqsave(&sclp_lock, flags);
  892. rc = 0;
  893. break;
  894. }
  895. }
  896. sclp_mask_state = sclp_mask_state_idle;
  897. spin_unlock_irqrestore(&sclp_lock, flags);
  898. return rc;
  899. }
  900. /* Deactivate SCLP interface. On success, new requests will be rejected,
  901. * events will no longer be dispatched. Return 0 on success, non-zero
  902. * otherwise. */
  903. int
  904. sclp_deactivate(void)
  905. {
  906. unsigned long flags;
  907. int rc;
  908. spin_lock_irqsave(&sclp_lock, flags);
  909. /* Deactivate can only be called when active */
  910. if (sclp_activation_state != sclp_activation_state_active) {
  911. spin_unlock_irqrestore(&sclp_lock, flags);
  912. return -EINVAL;
  913. }
  914. sclp_activation_state = sclp_activation_state_deactivating;
  915. spin_unlock_irqrestore(&sclp_lock, flags);
  916. rc = sclp_init_mask(0);
  917. spin_lock_irqsave(&sclp_lock, flags);
  918. if (rc == 0)
  919. sclp_activation_state = sclp_activation_state_inactive;
  920. else
  921. sclp_activation_state = sclp_activation_state_active;
  922. spin_unlock_irqrestore(&sclp_lock, flags);
  923. return rc;
  924. }
  925. EXPORT_SYMBOL(sclp_deactivate);
  926. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  927. * requests will be accepted, events will be dispatched again. Return 0 on
  928. * success, non-zero otherwise. */
  929. int
  930. sclp_reactivate(void)
  931. {
  932. unsigned long flags;
  933. int rc;
  934. spin_lock_irqsave(&sclp_lock, flags);
  935. /* Reactivate can only be called when inactive */
  936. if (sclp_activation_state != sclp_activation_state_inactive) {
  937. spin_unlock_irqrestore(&sclp_lock, flags);
  938. return -EINVAL;
  939. }
  940. sclp_activation_state = sclp_activation_state_activating;
  941. spin_unlock_irqrestore(&sclp_lock, flags);
  942. rc = sclp_init_mask(1);
  943. spin_lock_irqsave(&sclp_lock, flags);
  944. if (rc == 0)
  945. sclp_activation_state = sclp_activation_state_active;
  946. else
  947. sclp_activation_state = sclp_activation_state_inactive;
  948. spin_unlock_irqrestore(&sclp_lock, flags);
  949. return rc;
  950. }
  951. EXPORT_SYMBOL(sclp_reactivate);
  952. /* Handler for external interruption used during initialization. Modify
  953. * request state to done. */
  954. static void sclp_check_handler(struct ext_code ext_code,
  955. unsigned int param32, unsigned long param64)
  956. {
  957. u32 finished_sccb;
  958. inc_irq_stat(IRQEXT_SCP);
  959. finished_sccb = param32 & 0xfffffff8;
  960. /* Is this the interrupt we are waiting for? */
  961. if (finished_sccb == 0)
  962. return;
  963. if (finished_sccb != __pa(sclp_init_sccb))
  964. panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
  965. finished_sccb);
  966. spin_lock(&sclp_lock);
  967. if (sclp_running_state == sclp_running_state_running) {
  968. sclp_init_req.status = SCLP_REQ_DONE;
  969. sclp_running_state = sclp_running_state_idle;
  970. }
  971. spin_unlock(&sclp_lock);
  972. }
  973. /* Initial init mask request timed out. Modify request state to failed. */
  974. static void
  975. sclp_check_timeout(struct timer_list *unused)
  976. {
  977. unsigned long flags;
  978. spin_lock_irqsave(&sclp_lock, flags);
  979. if (sclp_running_state == sclp_running_state_running) {
  980. sclp_init_req.status = SCLP_REQ_FAILED;
  981. sclp_running_state = sclp_running_state_idle;
  982. }
  983. spin_unlock_irqrestore(&sclp_lock, flags);
  984. }
  985. /* Perform a check of the SCLP interface. Return zero if the interface is
  986. * available and there are no pending requests from a previous instance.
  987. * Return non-zero otherwise. */
  988. static int
  989. sclp_check_interface(void)
  990. {
  991. struct init_sccb *sccb;
  992. unsigned long flags;
  993. int retry;
  994. int rc;
  995. spin_lock_irqsave(&sclp_lock, flags);
  996. /* Prepare init mask command */
  997. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  998. if (rc) {
  999. spin_unlock_irqrestore(&sclp_lock, flags);
  1000. return rc;
  1001. }
  1002. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  1003. __sclp_make_init_req(0, 0);
  1004. sccb = (struct init_sccb *) sclp_init_req.sccb;
  1005. rc = sclp_service_call_trace(sclp_init_req.command, sccb);
  1006. if (rc == -EIO)
  1007. break;
  1008. sclp_init_req.status = SCLP_REQ_RUNNING;
  1009. sclp_running_state = sclp_running_state_running;
  1010. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  1011. sclp_check_timeout);
  1012. spin_unlock_irqrestore(&sclp_lock, flags);
  1013. /* Enable service-signal interruption - needs to happen
  1014. * with IRQs enabled. */
  1015. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1016. /* Wait for signal from interrupt or timeout */
  1017. sclp_sync_wait();
  1018. /* Disable service-signal interruption - needs to happen
  1019. * with IRQs enabled. */
  1020. irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1021. spin_lock_irqsave(&sclp_lock, flags);
  1022. del_timer(&sclp_request_timer);
  1023. rc = -EBUSY;
  1024. if (sclp_init_req.status == SCLP_REQ_DONE) {
  1025. if (sccb->header.response_code == 0x20) {
  1026. rc = 0;
  1027. break;
  1028. } else if (sccb->header.response_code == 0x74f0) {
  1029. if (!sclp_mask_compat_mode) {
  1030. sclp_mask_compat_mode = true;
  1031. retry = 0;
  1032. }
  1033. }
  1034. }
  1035. }
  1036. unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  1037. spin_unlock_irqrestore(&sclp_lock, flags);
  1038. return rc;
  1039. }
  1040. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  1041. * events from interfering with rebooted system. */
  1042. static int
  1043. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  1044. {
  1045. sclp_deactivate();
  1046. return NOTIFY_DONE;
  1047. }
  1048. static struct notifier_block sclp_reboot_notifier = {
  1049. .notifier_call = sclp_reboot_event,
  1050. .priority = INT_MIN,
  1051. };
  1052. static ssize_t con_pages_show(struct device_driver *dev, char *buf)
  1053. {
  1054. return sysfs_emit(buf, "%i\n", sclp_console_pages);
  1055. }
  1056. static DRIVER_ATTR_RO(con_pages);
  1057. static ssize_t con_drop_store(struct device_driver *dev, const char *buf, size_t count)
  1058. {
  1059. int rc;
  1060. rc = kstrtobool(buf, &sclp_console_drop);
  1061. return rc ?: count;
  1062. }
  1063. static ssize_t con_drop_show(struct device_driver *dev, char *buf)
  1064. {
  1065. return sysfs_emit(buf, "%i\n", sclp_console_drop);
  1066. }
  1067. static DRIVER_ATTR_RW(con_drop);
  1068. static ssize_t con_full_show(struct device_driver *dev, char *buf)
  1069. {
  1070. return sysfs_emit(buf, "%lu\n", sclp_console_full);
  1071. }
  1072. static DRIVER_ATTR_RO(con_full);
  1073. static struct attribute *sclp_drv_attrs[] = {
  1074. &driver_attr_con_pages.attr,
  1075. &driver_attr_con_drop.attr,
  1076. &driver_attr_con_full.attr,
  1077. NULL,
  1078. };
  1079. static struct attribute_group sclp_drv_attr_group = {
  1080. .attrs = sclp_drv_attrs,
  1081. };
  1082. static const struct attribute_group *sclp_drv_attr_groups[] = {
  1083. &sclp_drv_attr_group,
  1084. NULL,
  1085. };
  1086. static struct platform_driver sclp_pdrv = {
  1087. .driver = {
  1088. .name = "sclp",
  1089. .groups = sclp_drv_attr_groups,
  1090. },
  1091. };
  1092. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  1093. * otherwise. */
  1094. int sclp_init(void)
  1095. {
  1096. unsigned long flags;
  1097. int rc = 0;
  1098. spin_lock_irqsave(&sclp_lock, flags);
  1099. /* Check for previous or running initialization */
  1100. if (sclp_init_state != sclp_init_state_uninitialized)
  1101. goto fail_unlock;
  1102. sclp_init_state = sclp_init_state_initializing;
  1103. sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
  1104. sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
  1105. BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
  1106. /* Set up variables */
  1107. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  1108. timer_setup(&sclp_request_timer, NULL, 0);
  1109. timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
  1110. /* Check interface */
  1111. spin_unlock_irqrestore(&sclp_lock, flags);
  1112. rc = sclp_check_interface();
  1113. spin_lock_irqsave(&sclp_lock, flags);
  1114. if (rc)
  1115. goto fail_init_state_uninitialized;
  1116. /* Register reboot handler */
  1117. rc = register_reboot_notifier(&sclp_reboot_notifier);
  1118. if (rc)
  1119. goto fail_init_state_uninitialized;
  1120. /* Register interrupt handler */
  1121. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
  1122. if (rc)
  1123. goto fail_unregister_reboot_notifier;
  1124. sclp_init_state = sclp_init_state_initialized;
  1125. spin_unlock_irqrestore(&sclp_lock, flags);
  1126. /* Enable service-signal external interruption - needs to happen with
  1127. * IRQs enabled. */
  1128. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1129. sclp_init_mask(1);
  1130. return 0;
  1131. fail_unregister_reboot_notifier:
  1132. unregister_reboot_notifier(&sclp_reboot_notifier);
  1133. fail_init_state_uninitialized:
  1134. list_del(&sclp_state_change_event.list);
  1135. sclp_init_state = sclp_init_state_uninitialized;
  1136. free_page((unsigned long) sclp_read_sccb);
  1137. free_page((unsigned long) sclp_init_sccb);
  1138. fail_unlock:
  1139. spin_unlock_irqrestore(&sclp_lock, flags);
  1140. return rc;
  1141. }
  1142. static __init int sclp_initcall(void)
  1143. {
  1144. return platform_driver_register(&sclp_pdrv);
  1145. }
  1146. arch_initcall(sclp_initcall);