sclp.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * core function to access sclp interface
  4. *
  5. * Copyright IBM Corp. 1999, 2009
  6. *
  7. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  8. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  9. */
  10. #include <linux/kernel_stat.h>
  11. #include <linux/module.h>
  12. #include <linux/err.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/timer.h>
  16. #include <linux/reboot.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/init.h>
  19. #include <linux/suspend.h>
  20. #include <linux/completion.h>
  21. #include <linux/platform_device.h>
  22. #include <asm/types.h>
  23. #include <asm/irq.h>
  24. #include "sclp.h"
  25. #define SCLP_HEADER "sclp: "
  26. /* Lock to protect internal data consistency. */
  27. static DEFINE_SPINLOCK(sclp_lock);
  28. /* Mask of events that we can send to the sclp interface. */
  29. static sccb_mask_t sclp_receive_mask;
  30. /* Mask of events that we can receive from the sclp interface. */
  31. static sccb_mask_t sclp_send_mask;
  32. /* List of registered event listeners and senders. */
  33. static struct list_head sclp_reg_list;
  34. /* List of queued requests. */
  35. static struct list_head sclp_req_queue;
  36. /* Data for read and and init requests. */
  37. static struct sclp_req sclp_read_req;
  38. static struct sclp_req sclp_init_req;
  39. static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  40. static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  41. /* Suspend request */
  42. static DECLARE_COMPLETION(sclp_request_queue_flushed);
  43. /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
  44. int sclp_console_pages = SCLP_CONSOLE_PAGES;
  45. /* Flag to indicate if buffer pages are dropped on buffer full condition */
  46. int sclp_console_drop = 1;
  47. /* Number of times the console dropped buffer pages */
  48. unsigned long sclp_console_full;
  49. static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
  50. {
  51. complete(&sclp_request_queue_flushed);
  52. }
  53. static int __init sclp_setup_console_pages(char *str)
  54. {
  55. int pages, rc;
  56. rc = kstrtoint(str, 0, &pages);
  57. if (!rc && pages >= SCLP_CONSOLE_PAGES)
  58. sclp_console_pages = pages;
  59. return 1;
  60. }
  61. __setup("sclp_con_pages=", sclp_setup_console_pages);
  62. static int __init sclp_setup_console_drop(char *str)
  63. {
  64. int drop, rc;
  65. rc = kstrtoint(str, 0, &drop);
  66. if (!rc)
  67. sclp_console_drop = drop;
  68. return 1;
  69. }
  70. __setup("sclp_con_drop=", sclp_setup_console_drop);
  71. static struct sclp_req sclp_suspend_req;
  72. /* Timer for request retries. */
  73. static struct timer_list sclp_request_timer;
  74. /* Timer for queued requests. */
  75. static struct timer_list sclp_queue_timer;
  76. /* Internal state: is a request active at the sclp? */
  77. static volatile enum sclp_running_state_t {
  78. sclp_running_state_idle,
  79. sclp_running_state_running,
  80. sclp_running_state_reset_pending
  81. } sclp_running_state = sclp_running_state_idle;
  82. /* Internal state: is a read request pending? */
  83. static volatile enum sclp_reading_state_t {
  84. sclp_reading_state_idle,
  85. sclp_reading_state_reading
  86. } sclp_reading_state = sclp_reading_state_idle;
  87. /* Internal state: is the driver currently serving requests? */
  88. static volatile enum sclp_activation_state_t {
  89. sclp_activation_state_active,
  90. sclp_activation_state_deactivating,
  91. sclp_activation_state_inactive,
  92. sclp_activation_state_activating
  93. } sclp_activation_state = sclp_activation_state_active;
  94. /* Internal state: is an init mask request pending? */
  95. static volatile enum sclp_mask_state_t {
  96. sclp_mask_state_idle,
  97. sclp_mask_state_initializing
  98. } sclp_mask_state = sclp_mask_state_idle;
  99. /* Internal state: is the driver suspended? */
  100. static enum sclp_suspend_state_t {
  101. sclp_suspend_state_running,
  102. sclp_suspend_state_suspended,
  103. } sclp_suspend_state = sclp_suspend_state_running;
  104. /* Maximum retry counts */
  105. #define SCLP_INIT_RETRY 3
  106. #define SCLP_MASK_RETRY 3
  107. /* Timeout intervals in seconds.*/
  108. #define SCLP_BUSY_INTERVAL 10
  109. #define SCLP_RETRY_INTERVAL 30
  110. static void sclp_request_timeout(bool force_restart);
  111. static void sclp_process_queue(void);
  112. static void __sclp_make_read_req(void);
  113. static int sclp_init_mask(int calculate);
  114. static int sclp_init(void);
  115. static void
  116. __sclp_queue_read_req(void)
  117. {
  118. if (sclp_reading_state == sclp_reading_state_idle) {
  119. sclp_reading_state = sclp_reading_state_reading;
  120. __sclp_make_read_req();
  121. /* Add request to head of queue */
  122. list_add(&sclp_read_req.list, &sclp_req_queue);
  123. }
  124. }
  125. /* Set up request retry timer. Called while sclp_lock is locked. */
  126. static inline void
  127. __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
  128. {
  129. del_timer(&sclp_request_timer);
  130. sclp_request_timer.function = cb;
  131. sclp_request_timer.expires = jiffies + time;
  132. add_timer(&sclp_request_timer);
  133. }
  134. static void sclp_request_timeout_restart(struct timer_list *unused)
  135. {
  136. sclp_request_timeout(true);
  137. }
  138. static void sclp_request_timeout_normal(struct timer_list *unused)
  139. {
  140. sclp_request_timeout(false);
  141. }
  142. /* Request timeout handler. Restart the request queue. If force_restart,
  143. * force restart of running request. */
  144. static void sclp_request_timeout(bool force_restart)
  145. {
  146. unsigned long flags;
  147. spin_lock_irqsave(&sclp_lock, flags);
  148. if (force_restart) {
  149. if (sclp_running_state == sclp_running_state_running) {
  150. /* Break running state and queue NOP read event request
  151. * to get a defined interface state. */
  152. __sclp_queue_read_req();
  153. sclp_running_state = sclp_running_state_idle;
  154. }
  155. } else {
  156. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  157. sclp_request_timeout_normal);
  158. }
  159. spin_unlock_irqrestore(&sclp_lock, flags);
  160. sclp_process_queue();
  161. }
  162. /*
  163. * Returns the expire value in jiffies of the next pending request timeout,
  164. * if any. Needs to be called with sclp_lock.
  165. */
  166. static unsigned long __sclp_req_queue_find_next_timeout(void)
  167. {
  168. unsigned long expires_next = 0;
  169. struct sclp_req *req;
  170. list_for_each_entry(req, &sclp_req_queue, list) {
  171. if (!req->queue_expires)
  172. continue;
  173. if (!expires_next ||
  174. (time_before(req->queue_expires, expires_next)))
  175. expires_next = req->queue_expires;
  176. }
  177. return expires_next;
  178. }
  179. /*
  180. * Returns expired request, if any, and removes it from the list.
  181. */
  182. static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
  183. {
  184. unsigned long flags, now;
  185. struct sclp_req *req;
  186. spin_lock_irqsave(&sclp_lock, flags);
  187. now = jiffies;
  188. /* Don't need list_for_each_safe because we break out after list_del */
  189. list_for_each_entry(req, &sclp_req_queue, list) {
  190. if (!req->queue_expires)
  191. continue;
  192. if (time_before_eq(req->queue_expires, now)) {
  193. if (req->status == SCLP_REQ_QUEUED) {
  194. req->status = SCLP_REQ_QUEUED_TIMEOUT;
  195. list_del(&req->list);
  196. goto out;
  197. }
  198. }
  199. }
  200. req = NULL;
  201. out:
  202. spin_unlock_irqrestore(&sclp_lock, flags);
  203. return req;
  204. }
  205. /*
  206. * Timeout handler for queued requests. Removes request from list and
  207. * invokes callback. This timer can be set per request in situations where
  208. * waiting too long would be harmful to the system, e.g. during SE reboot.
  209. */
  210. static void sclp_req_queue_timeout(struct timer_list *unused)
  211. {
  212. unsigned long flags, expires_next;
  213. struct sclp_req *req;
  214. do {
  215. req = __sclp_req_queue_remove_expired_req();
  216. if (req && req->callback)
  217. req->callback(req, req->callback_data);
  218. } while (req);
  219. spin_lock_irqsave(&sclp_lock, flags);
  220. expires_next = __sclp_req_queue_find_next_timeout();
  221. if (expires_next)
  222. mod_timer(&sclp_queue_timer, expires_next);
  223. spin_unlock_irqrestore(&sclp_lock, flags);
  224. }
  225. /* Try to start a request. Return zero if the request was successfully
  226. * started or if it will be started at a later time. Return non-zero otherwise.
  227. * Called while sclp_lock is locked. */
  228. static int
  229. __sclp_start_request(struct sclp_req *req)
  230. {
  231. int rc;
  232. if (sclp_running_state != sclp_running_state_idle)
  233. return 0;
  234. del_timer(&sclp_request_timer);
  235. rc = sclp_service_call(req->command, req->sccb);
  236. req->start_count++;
  237. if (rc == 0) {
  238. /* Successfully started request */
  239. req->status = SCLP_REQ_RUNNING;
  240. sclp_running_state = sclp_running_state_running;
  241. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  242. sclp_request_timeout_restart);
  243. return 0;
  244. } else if (rc == -EBUSY) {
  245. /* Try again later */
  246. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  247. sclp_request_timeout_normal);
  248. return 0;
  249. }
  250. /* Request failed */
  251. req->status = SCLP_REQ_FAILED;
  252. return rc;
  253. }
  254. /* Try to start queued requests. */
  255. static void
  256. sclp_process_queue(void)
  257. {
  258. struct sclp_req *req;
  259. int rc;
  260. unsigned long flags;
  261. spin_lock_irqsave(&sclp_lock, flags);
  262. if (sclp_running_state != sclp_running_state_idle) {
  263. spin_unlock_irqrestore(&sclp_lock, flags);
  264. return;
  265. }
  266. del_timer(&sclp_request_timer);
  267. while (!list_empty(&sclp_req_queue)) {
  268. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  269. if (!req->sccb)
  270. goto do_post;
  271. rc = __sclp_start_request(req);
  272. if (rc == 0)
  273. break;
  274. /* Request failed */
  275. if (req->start_count > 1) {
  276. /* Cannot abort already submitted request - could still
  277. * be active at the SCLP */
  278. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  279. sclp_request_timeout_normal);
  280. break;
  281. }
  282. do_post:
  283. /* Post-processing for aborted request */
  284. list_del(&req->list);
  285. if (req->callback) {
  286. spin_unlock_irqrestore(&sclp_lock, flags);
  287. req->callback(req, req->callback_data);
  288. spin_lock_irqsave(&sclp_lock, flags);
  289. }
  290. }
  291. spin_unlock_irqrestore(&sclp_lock, flags);
  292. }
  293. static int __sclp_can_add_request(struct sclp_req *req)
  294. {
  295. if (req == &sclp_suspend_req || req == &sclp_init_req)
  296. return 1;
  297. if (sclp_suspend_state != sclp_suspend_state_running)
  298. return 0;
  299. if (sclp_init_state != sclp_init_state_initialized)
  300. return 0;
  301. if (sclp_activation_state != sclp_activation_state_active)
  302. return 0;
  303. return 1;
  304. }
  305. /* Queue a new request. Return zero on success, non-zero otherwise. */
  306. int
  307. sclp_add_request(struct sclp_req *req)
  308. {
  309. unsigned long flags;
  310. int rc;
  311. spin_lock_irqsave(&sclp_lock, flags);
  312. if (!__sclp_can_add_request(req)) {
  313. spin_unlock_irqrestore(&sclp_lock, flags);
  314. return -EIO;
  315. }
  316. req->status = SCLP_REQ_QUEUED;
  317. req->start_count = 0;
  318. list_add_tail(&req->list, &sclp_req_queue);
  319. rc = 0;
  320. if (req->queue_timeout) {
  321. req->queue_expires = jiffies + req->queue_timeout * HZ;
  322. if (!timer_pending(&sclp_queue_timer) ||
  323. time_after(sclp_queue_timer.expires, req->queue_expires))
  324. mod_timer(&sclp_queue_timer, req->queue_expires);
  325. } else
  326. req->queue_expires = 0;
  327. /* Start if request is first in list */
  328. if (sclp_running_state == sclp_running_state_idle &&
  329. req->list.prev == &sclp_req_queue) {
  330. if (!req->sccb) {
  331. list_del(&req->list);
  332. rc = -ENODATA;
  333. goto out;
  334. }
  335. rc = __sclp_start_request(req);
  336. if (rc)
  337. list_del(&req->list);
  338. }
  339. out:
  340. spin_unlock_irqrestore(&sclp_lock, flags);
  341. return rc;
  342. }
  343. EXPORT_SYMBOL(sclp_add_request);
  344. /* Dispatch events found in request buffer to registered listeners. Return 0
  345. * if all events were dispatched, non-zero otherwise. */
  346. static int
  347. sclp_dispatch_evbufs(struct sccb_header *sccb)
  348. {
  349. unsigned long flags;
  350. struct evbuf_header *evbuf;
  351. struct list_head *l;
  352. struct sclp_register *reg;
  353. int offset;
  354. int rc;
  355. spin_lock_irqsave(&sclp_lock, flags);
  356. rc = 0;
  357. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  358. offset += evbuf->length) {
  359. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  360. /* Check for malformed hardware response */
  361. if (evbuf->length == 0)
  362. break;
  363. /* Search for event handler */
  364. reg = NULL;
  365. list_for_each(l, &sclp_reg_list) {
  366. reg = list_entry(l, struct sclp_register, list);
  367. if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
  368. break;
  369. else
  370. reg = NULL;
  371. }
  372. if (reg && reg->receiver_fn) {
  373. spin_unlock_irqrestore(&sclp_lock, flags);
  374. reg->receiver_fn(evbuf);
  375. spin_lock_irqsave(&sclp_lock, flags);
  376. } else if (reg == NULL)
  377. rc = -EOPNOTSUPP;
  378. }
  379. spin_unlock_irqrestore(&sclp_lock, flags);
  380. return rc;
  381. }
  382. /* Read event data request callback. */
  383. static void
  384. sclp_read_cb(struct sclp_req *req, void *data)
  385. {
  386. unsigned long flags;
  387. struct sccb_header *sccb;
  388. sccb = (struct sccb_header *) req->sccb;
  389. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  390. sccb->response_code == 0x220))
  391. sclp_dispatch_evbufs(sccb);
  392. spin_lock_irqsave(&sclp_lock, flags);
  393. sclp_reading_state = sclp_reading_state_idle;
  394. spin_unlock_irqrestore(&sclp_lock, flags);
  395. }
  396. /* Prepare read event data request. Called while sclp_lock is locked. */
  397. static void __sclp_make_read_req(void)
  398. {
  399. struct sccb_header *sccb;
  400. sccb = (struct sccb_header *) sclp_read_sccb;
  401. clear_page(sccb);
  402. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  403. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  404. sclp_read_req.status = SCLP_REQ_QUEUED;
  405. sclp_read_req.start_count = 0;
  406. sclp_read_req.callback = sclp_read_cb;
  407. sclp_read_req.sccb = sccb;
  408. sccb->length = PAGE_SIZE;
  409. sccb->function_code = 0;
  410. sccb->control_mask[2] = 0x80;
  411. }
  412. /* Search request list for request with matching sccb. Return request if found,
  413. * NULL otherwise. Called while sclp_lock is locked. */
  414. static inline struct sclp_req *
  415. __sclp_find_req(u32 sccb)
  416. {
  417. struct list_head *l;
  418. struct sclp_req *req;
  419. list_for_each(l, &sclp_req_queue) {
  420. req = list_entry(l, struct sclp_req, list);
  421. if (sccb == (u32) (addr_t) req->sccb)
  422. return req;
  423. }
  424. return NULL;
  425. }
  426. /* Handler for external interruption. Perform request post-processing.
  427. * Prepare read event data request if necessary. Start processing of next
  428. * request on queue. */
  429. static void sclp_interrupt_handler(struct ext_code ext_code,
  430. unsigned int param32, unsigned long param64)
  431. {
  432. struct sclp_req *req;
  433. u32 finished_sccb;
  434. u32 evbuf_pending;
  435. inc_irq_stat(IRQEXT_SCP);
  436. spin_lock(&sclp_lock);
  437. finished_sccb = param32 & 0xfffffff8;
  438. evbuf_pending = param32 & 0x3;
  439. if (finished_sccb) {
  440. del_timer(&sclp_request_timer);
  441. sclp_running_state = sclp_running_state_reset_pending;
  442. req = __sclp_find_req(finished_sccb);
  443. if (req) {
  444. /* Request post-processing */
  445. list_del(&req->list);
  446. req->status = SCLP_REQ_DONE;
  447. if (req->callback) {
  448. spin_unlock(&sclp_lock);
  449. req->callback(req, req->callback_data);
  450. spin_lock(&sclp_lock);
  451. }
  452. }
  453. sclp_running_state = sclp_running_state_idle;
  454. }
  455. if (evbuf_pending &&
  456. sclp_activation_state == sclp_activation_state_active)
  457. __sclp_queue_read_req();
  458. spin_unlock(&sclp_lock);
  459. sclp_process_queue();
  460. }
  461. /* Convert interval in jiffies to TOD ticks. */
  462. static inline u64
  463. sclp_tod_from_jiffies(unsigned long jiffies)
  464. {
  465. return (u64) (jiffies / HZ) << 32;
  466. }
  467. /* Wait until a currently running request finished. Note: while this function
  468. * is running, no timers are served on the calling CPU. */
  469. void
  470. sclp_sync_wait(void)
  471. {
  472. unsigned long long old_tick;
  473. unsigned long flags;
  474. unsigned long cr0, cr0_sync;
  475. u64 timeout;
  476. int irq_context;
  477. /* We'll be disabling timer interrupts, so we need a custom timeout
  478. * mechanism */
  479. timeout = 0;
  480. if (timer_pending(&sclp_request_timer)) {
  481. /* Get timeout TOD value */
  482. timeout = get_tod_clock_fast() +
  483. sclp_tod_from_jiffies(sclp_request_timer.expires -
  484. jiffies);
  485. }
  486. local_irq_save(flags);
  487. /* Prevent bottom half from executing once we force interrupts open */
  488. irq_context = in_interrupt();
  489. if (!irq_context)
  490. local_bh_disable();
  491. /* Enable service-signal interruption, disable timer interrupts */
  492. old_tick = local_tick_disable();
  493. trace_hardirqs_on();
  494. __ctl_store(cr0, 0, 0);
  495. cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
  496. cr0_sync |= 1UL << (63 - 54);
  497. __ctl_load(cr0_sync, 0, 0);
  498. __arch_local_irq_stosm(0x01);
  499. /* Loop until driver state indicates finished request */
  500. while (sclp_running_state != sclp_running_state_idle) {
  501. /* Check for expired request timer */
  502. if (timer_pending(&sclp_request_timer) &&
  503. get_tod_clock_fast() > timeout &&
  504. del_timer(&sclp_request_timer))
  505. sclp_request_timer.function(&sclp_request_timer);
  506. cpu_relax();
  507. }
  508. local_irq_disable();
  509. __ctl_load(cr0, 0, 0);
  510. if (!irq_context)
  511. _local_bh_enable();
  512. local_tick_enable(old_tick);
  513. local_irq_restore(flags);
  514. }
  515. EXPORT_SYMBOL(sclp_sync_wait);
  516. /* Dispatch changes in send and receive mask to registered listeners. */
  517. static void
  518. sclp_dispatch_state_change(void)
  519. {
  520. struct list_head *l;
  521. struct sclp_register *reg;
  522. unsigned long flags;
  523. sccb_mask_t receive_mask;
  524. sccb_mask_t send_mask;
  525. do {
  526. spin_lock_irqsave(&sclp_lock, flags);
  527. reg = NULL;
  528. list_for_each(l, &sclp_reg_list) {
  529. reg = list_entry(l, struct sclp_register, list);
  530. receive_mask = reg->send_mask & sclp_receive_mask;
  531. send_mask = reg->receive_mask & sclp_send_mask;
  532. if (reg->sclp_receive_mask != receive_mask ||
  533. reg->sclp_send_mask != send_mask) {
  534. reg->sclp_receive_mask = receive_mask;
  535. reg->sclp_send_mask = send_mask;
  536. break;
  537. } else
  538. reg = NULL;
  539. }
  540. spin_unlock_irqrestore(&sclp_lock, flags);
  541. if (reg && reg->state_change_fn)
  542. reg->state_change_fn(reg);
  543. } while (reg);
  544. }
  545. struct sclp_statechangebuf {
  546. struct evbuf_header header;
  547. u8 validity_sclp_active_facility_mask : 1;
  548. u8 validity_sclp_receive_mask : 1;
  549. u8 validity_sclp_send_mask : 1;
  550. u8 validity_read_data_function_mask : 1;
  551. u16 _zeros : 12;
  552. u16 mask_length;
  553. u64 sclp_active_facility_mask;
  554. u8 masks[2 * 1021 + 4]; /* variable length */
  555. /*
  556. * u8 sclp_receive_mask[mask_length];
  557. * u8 sclp_send_mask[mask_length];
  558. * u32 read_data_function_mask;
  559. */
  560. } __attribute__((packed));
  561. /* State change event callback. Inform listeners of changes. */
  562. static void
  563. sclp_state_change_cb(struct evbuf_header *evbuf)
  564. {
  565. unsigned long flags;
  566. struct sclp_statechangebuf *scbuf;
  567. BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
  568. scbuf = (struct sclp_statechangebuf *) evbuf;
  569. spin_lock_irqsave(&sclp_lock, flags);
  570. if (scbuf->validity_sclp_receive_mask)
  571. sclp_receive_mask = sccb_get_recv_mask(scbuf);
  572. if (scbuf->validity_sclp_send_mask)
  573. sclp_send_mask = sccb_get_send_mask(scbuf);
  574. spin_unlock_irqrestore(&sclp_lock, flags);
  575. if (scbuf->validity_sclp_active_facility_mask)
  576. sclp.facilities = scbuf->sclp_active_facility_mask;
  577. sclp_dispatch_state_change();
  578. }
  579. static struct sclp_register sclp_state_change_event = {
  580. .receive_mask = EVTYP_STATECHANGE_MASK,
  581. .receiver_fn = sclp_state_change_cb
  582. };
  583. /* Calculate receive and send mask of currently registered listeners.
  584. * Called while sclp_lock is locked. */
  585. static inline void
  586. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  587. {
  588. struct list_head *l;
  589. struct sclp_register *t;
  590. *receive_mask = 0;
  591. *send_mask = 0;
  592. list_for_each(l, &sclp_reg_list) {
  593. t = list_entry(l, struct sclp_register, list);
  594. *receive_mask |= t->receive_mask;
  595. *send_mask |= t->send_mask;
  596. }
  597. }
  598. /* Register event listener. Return 0 on success, non-zero otherwise. */
  599. int
  600. sclp_register(struct sclp_register *reg)
  601. {
  602. unsigned long flags;
  603. sccb_mask_t receive_mask;
  604. sccb_mask_t send_mask;
  605. int rc;
  606. rc = sclp_init();
  607. if (rc)
  608. return rc;
  609. spin_lock_irqsave(&sclp_lock, flags);
  610. /* Check event mask for collisions */
  611. __sclp_get_mask(&receive_mask, &send_mask);
  612. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  613. spin_unlock_irqrestore(&sclp_lock, flags);
  614. return -EBUSY;
  615. }
  616. /* Trigger initial state change callback */
  617. reg->sclp_receive_mask = 0;
  618. reg->sclp_send_mask = 0;
  619. reg->pm_event_posted = 0;
  620. list_add(&reg->list, &sclp_reg_list);
  621. spin_unlock_irqrestore(&sclp_lock, flags);
  622. rc = sclp_init_mask(1);
  623. if (rc) {
  624. spin_lock_irqsave(&sclp_lock, flags);
  625. list_del(&reg->list);
  626. spin_unlock_irqrestore(&sclp_lock, flags);
  627. }
  628. return rc;
  629. }
  630. EXPORT_SYMBOL(sclp_register);
  631. /* Unregister event listener. */
  632. void
  633. sclp_unregister(struct sclp_register *reg)
  634. {
  635. unsigned long flags;
  636. spin_lock_irqsave(&sclp_lock, flags);
  637. list_del(&reg->list);
  638. spin_unlock_irqrestore(&sclp_lock, flags);
  639. sclp_init_mask(1);
  640. }
  641. EXPORT_SYMBOL(sclp_unregister);
  642. /* Remove event buffers which are marked processed. Return the number of
  643. * remaining event buffers. */
  644. int
  645. sclp_remove_processed(struct sccb_header *sccb)
  646. {
  647. struct evbuf_header *evbuf;
  648. int unprocessed;
  649. u16 remaining;
  650. evbuf = (struct evbuf_header *) (sccb + 1);
  651. unprocessed = 0;
  652. remaining = sccb->length - sizeof(struct sccb_header);
  653. while (remaining > 0) {
  654. remaining -= evbuf->length;
  655. if (evbuf->flags & 0x80) {
  656. sccb->length -= evbuf->length;
  657. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  658. remaining);
  659. } else {
  660. unprocessed++;
  661. evbuf = (struct evbuf_header *)
  662. ((addr_t) evbuf + evbuf->length);
  663. }
  664. }
  665. return unprocessed;
  666. }
  667. EXPORT_SYMBOL(sclp_remove_processed);
  668. /* Prepare init mask request. Called while sclp_lock is locked. */
  669. static inline void
  670. __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
  671. {
  672. struct init_sccb *sccb;
  673. sccb = (struct init_sccb *) sclp_init_sccb;
  674. clear_page(sccb);
  675. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  676. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  677. sclp_init_req.status = SCLP_REQ_FILLED;
  678. sclp_init_req.start_count = 0;
  679. sclp_init_req.callback = NULL;
  680. sclp_init_req.callback_data = NULL;
  681. sclp_init_req.sccb = sccb;
  682. sccb->header.length = sizeof(*sccb);
  683. if (sclp_mask_compat_mode)
  684. sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
  685. else
  686. sccb->mask_length = sizeof(sccb_mask_t);
  687. sccb_set_recv_mask(sccb, receive_mask);
  688. sccb_set_send_mask(sccb, send_mask);
  689. sccb_set_sclp_recv_mask(sccb, 0);
  690. sccb_set_sclp_send_mask(sccb, 0);
  691. }
  692. /* Start init mask request. If calculate is non-zero, calculate the mask as
  693. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  694. * success, non-zero otherwise. */
  695. static int
  696. sclp_init_mask(int calculate)
  697. {
  698. unsigned long flags;
  699. struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
  700. sccb_mask_t receive_mask;
  701. sccb_mask_t send_mask;
  702. int retry;
  703. int rc;
  704. unsigned long wait;
  705. spin_lock_irqsave(&sclp_lock, flags);
  706. /* Check if interface is in appropriate state */
  707. if (sclp_mask_state != sclp_mask_state_idle) {
  708. spin_unlock_irqrestore(&sclp_lock, flags);
  709. return -EBUSY;
  710. }
  711. if (sclp_activation_state == sclp_activation_state_inactive) {
  712. spin_unlock_irqrestore(&sclp_lock, flags);
  713. return -EINVAL;
  714. }
  715. sclp_mask_state = sclp_mask_state_initializing;
  716. /* Determine mask */
  717. if (calculate)
  718. __sclp_get_mask(&receive_mask, &send_mask);
  719. else {
  720. receive_mask = 0;
  721. send_mask = 0;
  722. }
  723. rc = -EIO;
  724. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  725. /* Prepare request */
  726. __sclp_make_init_req(receive_mask, send_mask);
  727. spin_unlock_irqrestore(&sclp_lock, flags);
  728. if (sclp_add_request(&sclp_init_req)) {
  729. /* Try again later */
  730. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  731. while (time_before(jiffies, wait))
  732. sclp_sync_wait();
  733. spin_lock_irqsave(&sclp_lock, flags);
  734. continue;
  735. }
  736. while (sclp_init_req.status != SCLP_REQ_DONE &&
  737. sclp_init_req.status != SCLP_REQ_FAILED)
  738. sclp_sync_wait();
  739. spin_lock_irqsave(&sclp_lock, flags);
  740. if (sclp_init_req.status == SCLP_REQ_DONE &&
  741. sccb->header.response_code == 0x20) {
  742. /* Successful request */
  743. if (calculate) {
  744. sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
  745. sclp_send_mask = sccb_get_sclp_send_mask(sccb);
  746. } else {
  747. sclp_receive_mask = 0;
  748. sclp_send_mask = 0;
  749. }
  750. spin_unlock_irqrestore(&sclp_lock, flags);
  751. sclp_dispatch_state_change();
  752. spin_lock_irqsave(&sclp_lock, flags);
  753. rc = 0;
  754. break;
  755. }
  756. }
  757. sclp_mask_state = sclp_mask_state_idle;
  758. spin_unlock_irqrestore(&sclp_lock, flags);
  759. return rc;
  760. }
  761. /* Deactivate SCLP interface. On success, new requests will be rejected,
  762. * events will no longer be dispatched. Return 0 on success, non-zero
  763. * otherwise. */
  764. int
  765. sclp_deactivate(void)
  766. {
  767. unsigned long flags;
  768. int rc;
  769. spin_lock_irqsave(&sclp_lock, flags);
  770. /* Deactivate can only be called when active */
  771. if (sclp_activation_state != sclp_activation_state_active) {
  772. spin_unlock_irqrestore(&sclp_lock, flags);
  773. return -EINVAL;
  774. }
  775. sclp_activation_state = sclp_activation_state_deactivating;
  776. spin_unlock_irqrestore(&sclp_lock, flags);
  777. rc = sclp_init_mask(0);
  778. spin_lock_irqsave(&sclp_lock, flags);
  779. if (rc == 0)
  780. sclp_activation_state = sclp_activation_state_inactive;
  781. else
  782. sclp_activation_state = sclp_activation_state_active;
  783. spin_unlock_irqrestore(&sclp_lock, flags);
  784. return rc;
  785. }
  786. EXPORT_SYMBOL(sclp_deactivate);
  787. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  788. * requests will be accepted, events will be dispatched again. Return 0 on
  789. * success, non-zero otherwise. */
  790. int
  791. sclp_reactivate(void)
  792. {
  793. unsigned long flags;
  794. int rc;
  795. spin_lock_irqsave(&sclp_lock, flags);
  796. /* Reactivate can only be called when inactive */
  797. if (sclp_activation_state != sclp_activation_state_inactive) {
  798. spin_unlock_irqrestore(&sclp_lock, flags);
  799. return -EINVAL;
  800. }
  801. sclp_activation_state = sclp_activation_state_activating;
  802. spin_unlock_irqrestore(&sclp_lock, flags);
  803. rc = sclp_init_mask(1);
  804. spin_lock_irqsave(&sclp_lock, flags);
  805. if (rc == 0)
  806. sclp_activation_state = sclp_activation_state_active;
  807. else
  808. sclp_activation_state = sclp_activation_state_inactive;
  809. spin_unlock_irqrestore(&sclp_lock, flags);
  810. return rc;
  811. }
  812. EXPORT_SYMBOL(sclp_reactivate);
  813. /* Handler for external interruption used during initialization. Modify
  814. * request state to done. */
  815. static void sclp_check_handler(struct ext_code ext_code,
  816. unsigned int param32, unsigned long param64)
  817. {
  818. u32 finished_sccb;
  819. inc_irq_stat(IRQEXT_SCP);
  820. finished_sccb = param32 & 0xfffffff8;
  821. /* Is this the interrupt we are waiting for? */
  822. if (finished_sccb == 0)
  823. return;
  824. if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
  825. panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
  826. finished_sccb);
  827. spin_lock(&sclp_lock);
  828. if (sclp_running_state == sclp_running_state_running) {
  829. sclp_init_req.status = SCLP_REQ_DONE;
  830. sclp_running_state = sclp_running_state_idle;
  831. }
  832. spin_unlock(&sclp_lock);
  833. }
  834. /* Initial init mask request timed out. Modify request state to failed. */
  835. static void
  836. sclp_check_timeout(struct timer_list *unused)
  837. {
  838. unsigned long flags;
  839. spin_lock_irqsave(&sclp_lock, flags);
  840. if (sclp_running_state == sclp_running_state_running) {
  841. sclp_init_req.status = SCLP_REQ_FAILED;
  842. sclp_running_state = sclp_running_state_idle;
  843. }
  844. spin_unlock_irqrestore(&sclp_lock, flags);
  845. }
  846. /* Perform a check of the SCLP interface. Return zero if the interface is
  847. * available and there are no pending requests from a previous instance.
  848. * Return non-zero otherwise. */
  849. static int
  850. sclp_check_interface(void)
  851. {
  852. struct init_sccb *sccb;
  853. unsigned long flags;
  854. int retry;
  855. int rc;
  856. spin_lock_irqsave(&sclp_lock, flags);
  857. /* Prepare init mask command */
  858. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  859. if (rc) {
  860. spin_unlock_irqrestore(&sclp_lock, flags);
  861. return rc;
  862. }
  863. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  864. __sclp_make_init_req(0, 0);
  865. sccb = (struct init_sccb *) sclp_init_req.sccb;
  866. rc = sclp_service_call(sclp_init_req.command, sccb);
  867. if (rc == -EIO)
  868. break;
  869. sclp_init_req.status = SCLP_REQ_RUNNING;
  870. sclp_running_state = sclp_running_state_running;
  871. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  872. sclp_check_timeout);
  873. spin_unlock_irqrestore(&sclp_lock, flags);
  874. /* Enable service-signal interruption - needs to happen
  875. * with IRQs enabled. */
  876. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  877. /* Wait for signal from interrupt or timeout */
  878. sclp_sync_wait();
  879. /* Disable service-signal interruption - needs to happen
  880. * with IRQs enabled. */
  881. irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
  882. spin_lock_irqsave(&sclp_lock, flags);
  883. del_timer(&sclp_request_timer);
  884. rc = -EBUSY;
  885. if (sclp_init_req.status == SCLP_REQ_DONE) {
  886. if (sccb->header.response_code == 0x20) {
  887. rc = 0;
  888. break;
  889. } else if (sccb->header.response_code == 0x74f0) {
  890. if (!sclp_mask_compat_mode) {
  891. sclp_mask_compat_mode = true;
  892. retry = 0;
  893. }
  894. }
  895. }
  896. }
  897. unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  898. spin_unlock_irqrestore(&sclp_lock, flags);
  899. return rc;
  900. }
  901. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  902. * events from interfering with rebooted system. */
  903. static int
  904. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  905. {
  906. sclp_deactivate();
  907. return NOTIFY_DONE;
  908. }
  909. static struct notifier_block sclp_reboot_notifier = {
  910. .notifier_call = sclp_reboot_event
  911. };
  912. /*
  913. * Suspend/resume SCLP notifier implementation
  914. */
  915. static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
  916. {
  917. struct sclp_register *reg;
  918. unsigned long flags;
  919. if (!rollback) {
  920. spin_lock_irqsave(&sclp_lock, flags);
  921. list_for_each_entry(reg, &sclp_reg_list, list)
  922. reg->pm_event_posted = 0;
  923. spin_unlock_irqrestore(&sclp_lock, flags);
  924. }
  925. do {
  926. spin_lock_irqsave(&sclp_lock, flags);
  927. list_for_each_entry(reg, &sclp_reg_list, list) {
  928. if (rollback && reg->pm_event_posted)
  929. goto found;
  930. if (!rollback && !reg->pm_event_posted)
  931. goto found;
  932. }
  933. spin_unlock_irqrestore(&sclp_lock, flags);
  934. return;
  935. found:
  936. spin_unlock_irqrestore(&sclp_lock, flags);
  937. if (reg->pm_event_fn)
  938. reg->pm_event_fn(reg, sclp_pm_event);
  939. reg->pm_event_posted = rollback ? 0 : 1;
  940. } while (1);
  941. }
  942. /*
  943. * Susend/resume callbacks for platform device
  944. */
  945. static int sclp_freeze(struct device *dev)
  946. {
  947. unsigned long flags;
  948. int rc;
  949. sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
  950. spin_lock_irqsave(&sclp_lock, flags);
  951. sclp_suspend_state = sclp_suspend_state_suspended;
  952. spin_unlock_irqrestore(&sclp_lock, flags);
  953. /* Init supend data */
  954. memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
  955. sclp_suspend_req.callback = sclp_suspend_req_cb;
  956. sclp_suspend_req.status = SCLP_REQ_FILLED;
  957. init_completion(&sclp_request_queue_flushed);
  958. rc = sclp_add_request(&sclp_suspend_req);
  959. if (rc == 0)
  960. wait_for_completion(&sclp_request_queue_flushed);
  961. else if (rc != -ENODATA)
  962. goto fail_thaw;
  963. rc = sclp_deactivate();
  964. if (rc)
  965. goto fail_thaw;
  966. return 0;
  967. fail_thaw:
  968. spin_lock_irqsave(&sclp_lock, flags);
  969. sclp_suspend_state = sclp_suspend_state_running;
  970. spin_unlock_irqrestore(&sclp_lock, flags);
  971. sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
  972. return rc;
  973. }
  974. static int sclp_undo_suspend(enum sclp_pm_event event)
  975. {
  976. unsigned long flags;
  977. int rc;
  978. rc = sclp_reactivate();
  979. if (rc)
  980. return rc;
  981. spin_lock_irqsave(&sclp_lock, flags);
  982. sclp_suspend_state = sclp_suspend_state_running;
  983. spin_unlock_irqrestore(&sclp_lock, flags);
  984. sclp_pm_event(event, 0);
  985. return 0;
  986. }
  987. static int sclp_thaw(struct device *dev)
  988. {
  989. return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  990. }
  991. static int sclp_restore(struct device *dev)
  992. {
  993. return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
  994. }
  995. static const struct dev_pm_ops sclp_pm_ops = {
  996. .freeze = sclp_freeze,
  997. .thaw = sclp_thaw,
  998. .restore = sclp_restore,
  999. };
  1000. static ssize_t con_pages_show(struct device_driver *dev, char *buf)
  1001. {
  1002. return sprintf(buf, "%i\n", sclp_console_pages);
  1003. }
  1004. static DRIVER_ATTR_RO(con_pages);
  1005. static ssize_t con_drop_show(struct device_driver *dev, char *buf)
  1006. {
  1007. return sprintf(buf, "%i\n", sclp_console_drop);
  1008. }
  1009. static DRIVER_ATTR_RO(con_drop);
  1010. static ssize_t con_full_show(struct device_driver *dev, char *buf)
  1011. {
  1012. return sprintf(buf, "%lu\n", sclp_console_full);
  1013. }
  1014. static DRIVER_ATTR_RO(con_full);
  1015. static struct attribute *sclp_drv_attrs[] = {
  1016. &driver_attr_con_pages.attr,
  1017. &driver_attr_con_drop.attr,
  1018. &driver_attr_con_full.attr,
  1019. NULL,
  1020. };
  1021. static struct attribute_group sclp_drv_attr_group = {
  1022. .attrs = sclp_drv_attrs,
  1023. };
  1024. static const struct attribute_group *sclp_drv_attr_groups[] = {
  1025. &sclp_drv_attr_group,
  1026. NULL,
  1027. };
  1028. static struct platform_driver sclp_pdrv = {
  1029. .driver = {
  1030. .name = "sclp",
  1031. .pm = &sclp_pm_ops,
  1032. .groups = sclp_drv_attr_groups,
  1033. },
  1034. };
  1035. static struct platform_device *sclp_pdev;
  1036. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  1037. * otherwise. */
  1038. static int
  1039. sclp_init(void)
  1040. {
  1041. unsigned long flags;
  1042. int rc = 0;
  1043. spin_lock_irqsave(&sclp_lock, flags);
  1044. /* Check for previous or running initialization */
  1045. if (sclp_init_state != sclp_init_state_uninitialized)
  1046. goto fail_unlock;
  1047. sclp_init_state = sclp_init_state_initializing;
  1048. /* Set up variables */
  1049. INIT_LIST_HEAD(&sclp_req_queue);
  1050. INIT_LIST_HEAD(&sclp_reg_list);
  1051. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  1052. timer_setup(&sclp_request_timer, NULL, 0);
  1053. timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
  1054. /* Check interface */
  1055. spin_unlock_irqrestore(&sclp_lock, flags);
  1056. rc = sclp_check_interface();
  1057. spin_lock_irqsave(&sclp_lock, flags);
  1058. if (rc)
  1059. goto fail_init_state_uninitialized;
  1060. /* Register reboot handler */
  1061. rc = register_reboot_notifier(&sclp_reboot_notifier);
  1062. if (rc)
  1063. goto fail_init_state_uninitialized;
  1064. /* Register interrupt handler */
  1065. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
  1066. if (rc)
  1067. goto fail_unregister_reboot_notifier;
  1068. sclp_init_state = sclp_init_state_initialized;
  1069. spin_unlock_irqrestore(&sclp_lock, flags);
  1070. /* Enable service-signal external interruption - needs to happen with
  1071. * IRQs enabled. */
  1072. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1073. sclp_init_mask(1);
  1074. return 0;
  1075. fail_unregister_reboot_notifier:
  1076. unregister_reboot_notifier(&sclp_reboot_notifier);
  1077. fail_init_state_uninitialized:
  1078. sclp_init_state = sclp_init_state_uninitialized;
  1079. fail_unlock:
  1080. spin_unlock_irqrestore(&sclp_lock, flags);
  1081. return rc;
  1082. }
  1083. /*
  1084. * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
  1085. * to print the panic message.
  1086. */
  1087. static int sclp_panic_notify(struct notifier_block *self,
  1088. unsigned long event, void *data)
  1089. {
  1090. if (sclp_suspend_state == sclp_suspend_state_suspended)
  1091. sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  1092. return NOTIFY_OK;
  1093. }
  1094. static struct notifier_block sclp_on_panic_nb = {
  1095. .notifier_call = sclp_panic_notify,
  1096. .priority = SCLP_PANIC_PRIO,
  1097. };
  1098. static __init int sclp_initcall(void)
  1099. {
  1100. int rc;
  1101. rc = platform_driver_register(&sclp_pdrv);
  1102. if (rc)
  1103. return rc;
  1104. sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
  1105. rc = PTR_ERR_OR_ZERO(sclp_pdev);
  1106. if (rc)
  1107. goto fail_platform_driver_unregister;
  1108. rc = atomic_notifier_chain_register(&panic_notifier_list,
  1109. &sclp_on_panic_nb);
  1110. if (rc)
  1111. goto fail_platform_device_unregister;
  1112. return sclp_init();
  1113. fail_platform_device_unregister:
  1114. platform_device_unregister(sclp_pdev);
  1115. fail_platform_driver_unregister:
  1116. platform_driver_unregister(&sclp_pdrv);
  1117. return rc;
  1118. }
  1119. arch_initcall(sclp_initcall);