css.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * driver for channel subsystem
  4. *
  5. * Copyright IBM Corp. 2002, 2010
  6. *
  7. * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  8. * Cornelia Huck (cornelia.huck@de.ibm.com)
  9. */
  10. #define KMSG_COMPONENT "cio"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/export.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <linux/slab.h>
  16. #include <linux/errno.h>
  17. #include <linux/list.h>
  18. #include <linux/reboot.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/genalloc.h>
  21. #include <linux/dma-mapping.h>
  22. #include <asm/isc.h>
  23. #include <asm/crw.h>
  24. #include "css.h"
  25. #include "cio.h"
  26. #include "blacklist.h"
  27. #include "cio_debug.h"
  28. #include "ioasm.h"
  29. #include "chsc.h"
  30. #include "device.h"
  31. #include "idset.h"
  32. #include "chp.h"
  33. int css_init_done = 0;
  34. int max_ssid;
  35. #define MAX_CSS_IDX 0
  36. struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
  37. static const struct bus_type css_bus_type;
  38. int
  39. for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
  40. {
  41. struct subchannel_id schid;
  42. int ret;
  43. init_subchannel_id(&schid);
  44. do {
  45. do {
  46. ret = fn(schid, data);
  47. if (ret)
  48. break;
  49. } while (schid.sch_no++ < __MAX_SUBCHANNEL);
  50. schid.sch_no = 0;
  51. } while (schid.ssid++ < max_ssid);
  52. return ret;
  53. }
  54. struct cb_data {
  55. void *data;
  56. struct idset *set;
  57. int (*fn_known_sch)(struct subchannel *, void *);
  58. int (*fn_unknown_sch)(struct subchannel_id, void *);
  59. };
  60. static int call_fn_known_sch(struct device *dev, void *data)
  61. {
  62. struct subchannel *sch = to_subchannel(dev);
  63. struct cb_data *cb = data;
  64. int rc = 0;
  65. if (cb->set)
  66. idset_sch_del(cb->set, sch->schid);
  67. if (cb->fn_known_sch)
  68. rc = cb->fn_known_sch(sch, cb->data);
  69. return rc;
  70. }
  71. static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
  72. {
  73. struct cb_data *cb = data;
  74. int rc = 0;
  75. if (idset_sch_contains(cb->set, schid))
  76. rc = cb->fn_unknown_sch(schid, cb->data);
  77. return rc;
  78. }
  79. static int call_fn_all_sch(struct subchannel_id schid, void *data)
  80. {
  81. struct cb_data *cb = data;
  82. struct subchannel *sch;
  83. int rc = 0;
  84. sch = get_subchannel_by_schid(schid);
  85. if (sch) {
  86. if (cb->fn_known_sch)
  87. rc = cb->fn_known_sch(sch, cb->data);
  88. put_device(&sch->dev);
  89. } else {
  90. if (cb->fn_unknown_sch)
  91. rc = cb->fn_unknown_sch(schid, cb->data);
  92. }
  93. return rc;
  94. }
  95. int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
  96. int (*fn_unknown)(struct subchannel_id,
  97. void *), void *data)
  98. {
  99. struct cb_data cb;
  100. int rc;
  101. cb.data = data;
  102. cb.fn_known_sch = fn_known;
  103. cb.fn_unknown_sch = fn_unknown;
  104. if (fn_known && !fn_unknown) {
  105. /* Skip idset allocation in case of known-only loop. */
  106. cb.set = NULL;
  107. return bus_for_each_dev(&css_bus_type, NULL, &cb,
  108. call_fn_known_sch);
  109. }
  110. cb.set = idset_sch_new();
  111. if (!cb.set)
  112. /* fall back to brute force scanning in case of oom */
  113. return for_each_subchannel(call_fn_all_sch, &cb);
  114. idset_fill(cb.set);
  115. /* Process registered subchannels. */
  116. rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
  117. if (rc)
  118. goto out;
  119. /* Process unregistered subchannels. */
  120. if (fn_unknown)
  121. rc = for_each_subchannel(call_fn_unknown_sch, &cb);
  122. out:
  123. idset_free(cb.set);
  124. return rc;
  125. }
  126. static void css_sch_todo(struct work_struct *work);
  127. static void css_sch_create_locks(struct subchannel *sch)
  128. {
  129. spin_lock_init(&sch->lock);
  130. mutex_init(&sch->reg_mutex);
  131. }
  132. static void css_subchannel_release(struct device *dev)
  133. {
  134. struct subchannel *sch = to_subchannel(dev);
  135. sch->config.intparm = 0;
  136. cio_commit_config(sch);
  137. kfree(sch->driver_override);
  138. kfree(sch);
  139. }
  140. static int css_validate_subchannel(struct subchannel_id schid,
  141. struct schib *schib)
  142. {
  143. int err;
  144. switch (schib->pmcw.st) {
  145. case SUBCHANNEL_TYPE_IO:
  146. case SUBCHANNEL_TYPE_MSG:
  147. if (!css_sch_is_valid(schib))
  148. err = -ENODEV;
  149. else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
  150. CIO_MSG_EVENT(6, "Blacklisted device detected "
  151. "at devno %04X, subchannel set %x\n",
  152. schib->pmcw.dev, schid.ssid);
  153. err = -ENODEV;
  154. } else
  155. err = 0;
  156. break;
  157. default:
  158. err = 0;
  159. }
  160. if (err)
  161. goto out;
  162. CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
  163. schid.ssid, schid.sch_no, schib->pmcw.st);
  164. out:
  165. return err;
  166. }
  167. struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
  168. struct schib *schib)
  169. {
  170. struct subchannel *sch;
  171. int ret;
  172. ret = css_validate_subchannel(schid, schib);
  173. if (ret < 0)
  174. return ERR_PTR(ret);
  175. sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
  176. if (!sch)
  177. return ERR_PTR(-ENOMEM);
  178. sch->schid = schid;
  179. sch->schib = *schib;
  180. sch->st = schib->pmcw.st;
  181. css_sch_create_locks(sch);
  182. INIT_WORK(&sch->todo_work, css_sch_todo);
  183. sch->dev.release = &css_subchannel_release;
  184. sch->dev.dma_mask = &sch->dma_mask;
  185. device_initialize(&sch->dev);
  186. /*
  187. * The physical addresses for some of the dma structures that can
  188. * belong to a subchannel need to fit 31 bit width (e.g. ccw).
  189. */
  190. ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
  191. if (ret)
  192. goto err;
  193. /*
  194. * But we don't have such restrictions imposed on the stuff that
  195. * is handled by the streaming API.
  196. */
  197. ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
  198. if (ret)
  199. goto err;
  200. return sch;
  201. err:
  202. kfree(sch);
  203. return ERR_PTR(ret);
  204. }
  205. static int css_sch_device_register(struct subchannel *sch)
  206. {
  207. int ret;
  208. mutex_lock(&sch->reg_mutex);
  209. dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
  210. sch->schid.sch_no);
  211. ret = device_add(&sch->dev);
  212. mutex_unlock(&sch->reg_mutex);
  213. return ret;
  214. }
  215. /**
  216. * css_sch_device_unregister - unregister a subchannel
  217. * @sch: subchannel to be unregistered
  218. */
  219. void css_sch_device_unregister(struct subchannel *sch)
  220. {
  221. mutex_lock(&sch->reg_mutex);
  222. if (device_is_registered(&sch->dev))
  223. device_unregister(&sch->dev);
  224. mutex_unlock(&sch->reg_mutex);
  225. }
  226. EXPORT_SYMBOL_GPL(css_sch_device_unregister);
  227. static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
  228. {
  229. int i;
  230. int mask;
  231. memset(ssd, 0, sizeof(struct chsc_ssd_info));
  232. ssd->path_mask = pmcw->pim;
  233. for (i = 0; i < 8; i++) {
  234. mask = 0x80 >> i;
  235. if (pmcw->pim & mask) {
  236. chp_id_init(&ssd->chpid[i]);
  237. ssd->chpid[i].id = pmcw->chpid[i];
  238. }
  239. }
  240. }
  241. static void ssd_register_chpids(struct chsc_ssd_info *ssd)
  242. {
  243. int i;
  244. int mask;
  245. for (i = 0; i < 8; i++) {
  246. mask = 0x80 >> i;
  247. if (ssd->path_mask & mask)
  248. chp_new(ssd->chpid[i]);
  249. }
  250. }
  251. void css_update_ssd_info(struct subchannel *sch)
  252. {
  253. int ret;
  254. ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
  255. if (ret)
  256. ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
  257. ssd_register_chpids(&sch->ssd_info);
  258. }
  259. static ssize_t type_show(struct device *dev, struct device_attribute *attr,
  260. char *buf)
  261. {
  262. struct subchannel *sch = to_subchannel(dev);
  263. return sysfs_emit(buf, "%01x\n", sch->st);
  264. }
  265. static DEVICE_ATTR_RO(type);
  266. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
  267. char *buf)
  268. {
  269. struct subchannel *sch = to_subchannel(dev);
  270. return sysfs_emit(buf, "css:t%01X\n", sch->st);
  271. }
  272. static DEVICE_ATTR_RO(modalias);
  273. static ssize_t driver_override_store(struct device *dev,
  274. struct device_attribute *attr,
  275. const char *buf, size_t count)
  276. {
  277. struct subchannel *sch = to_subchannel(dev);
  278. int ret;
  279. ret = driver_set_override(dev, &sch->driver_override, buf, count);
  280. if (ret)
  281. return ret;
  282. return count;
  283. }
  284. static ssize_t driver_override_show(struct device *dev,
  285. struct device_attribute *attr, char *buf)
  286. {
  287. struct subchannel *sch = to_subchannel(dev);
  288. ssize_t len;
  289. device_lock(dev);
  290. len = sysfs_emit(buf, "%s\n", sch->driver_override);
  291. device_unlock(dev);
  292. return len;
  293. }
  294. static DEVICE_ATTR_RW(driver_override);
  295. static struct attribute *subch_attrs[] = {
  296. &dev_attr_type.attr,
  297. &dev_attr_modalias.attr,
  298. &dev_attr_driver_override.attr,
  299. NULL,
  300. };
  301. static struct attribute_group subch_attr_group = {
  302. .attrs = subch_attrs,
  303. };
  304. static const struct attribute_group *default_subch_attr_groups[] = {
  305. &subch_attr_group,
  306. NULL,
  307. };
  308. static ssize_t chpids_show(struct device *dev,
  309. struct device_attribute *attr,
  310. char *buf)
  311. {
  312. struct subchannel *sch = to_subchannel(dev);
  313. struct chsc_ssd_info *ssd = &sch->ssd_info;
  314. ssize_t ret = 0;
  315. int mask;
  316. int chp;
  317. for (chp = 0; chp < 8; chp++) {
  318. mask = 0x80 >> chp;
  319. if (ssd->path_mask & mask)
  320. ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
  321. else
  322. ret += sprintf(buf + ret, "00 ");
  323. }
  324. ret += sprintf(buf + ret, "\n");
  325. return ret;
  326. }
  327. static DEVICE_ATTR_RO(chpids);
  328. static ssize_t pimpampom_show(struct device *dev,
  329. struct device_attribute *attr,
  330. char *buf)
  331. {
  332. struct subchannel *sch = to_subchannel(dev);
  333. struct pmcw *pmcw = &sch->schib.pmcw;
  334. return sysfs_emit(buf, "%02x %02x %02x\n",
  335. pmcw->pim, pmcw->pam, pmcw->pom);
  336. }
  337. static DEVICE_ATTR_RO(pimpampom);
  338. static ssize_t dev_busid_show(struct device *dev,
  339. struct device_attribute *attr,
  340. char *buf)
  341. {
  342. struct subchannel *sch = to_subchannel(dev);
  343. struct pmcw *pmcw = &sch->schib.pmcw;
  344. if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
  345. (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
  346. return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
  347. pmcw->dev);
  348. else
  349. return sysfs_emit(buf, "none\n");
  350. }
  351. static DEVICE_ATTR_RO(dev_busid);
  352. static struct attribute *io_subchannel_type_attrs[] = {
  353. &dev_attr_chpids.attr,
  354. &dev_attr_pimpampom.attr,
  355. &dev_attr_dev_busid.attr,
  356. NULL,
  357. };
  358. ATTRIBUTE_GROUPS(io_subchannel_type);
  359. static const struct device_type io_subchannel_type = {
  360. .groups = io_subchannel_type_groups,
  361. };
  362. int css_register_subchannel(struct subchannel *sch)
  363. {
  364. int ret;
  365. /* Initialize the subchannel structure */
  366. sch->dev.parent = &channel_subsystems[0]->device;
  367. sch->dev.bus = &css_bus_type;
  368. sch->dev.groups = default_subch_attr_groups;
  369. if (sch->st == SUBCHANNEL_TYPE_IO)
  370. sch->dev.type = &io_subchannel_type;
  371. css_update_ssd_info(sch);
  372. /* make it known to the system */
  373. ret = css_sch_device_register(sch);
  374. if (ret) {
  375. CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
  376. sch->schid.ssid, sch->schid.sch_no, ret);
  377. return ret;
  378. }
  379. return ret;
  380. }
  381. static int css_probe_device(struct subchannel_id schid, struct schib *schib)
  382. {
  383. struct subchannel *sch;
  384. int ret;
  385. sch = css_alloc_subchannel(schid, schib);
  386. if (IS_ERR(sch))
  387. return PTR_ERR(sch);
  388. ret = css_register_subchannel(sch);
  389. if (ret)
  390. put_device(&sch->dev);
  391. return ret;
  392. }
  393. static int
  394. check_subchannel(struct device *dev, const void *data)
  395. {
  396. struct subchannel *sch;
  397. struct subchannel_id *schid = (void *)data;
  398. sch = to_subchannel(dev);
  399. return schid_equal(&sch->schid, schid);
  400. }
  401. struct subchannel *
  402. get_subchannel_by_schid(struct subchannel_id schid)
  403. {
  404. struct device *dev;
  405. dev = bus_find_device(&css_bus_type, NULL,
  406. &schid, check_subchannel);
  407. return dev ? to_subchannel(dev) : NULL;
  408. }
  409. /**
  410. * css_sch_is_valid() - check if a subchannel is valid
  411. * @schib: subchannel information block for the subchannel
  412. */
  413. int css_sch_is_valid(struct schib *schib)
  414. {
  415. if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
  416. return 0;
  417. if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
  418. return 0;
  419. return 1;
  420. }
  421. EXPORT_SYMBOL_GPL(css_sch_is_valid);
  422. static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
  423. {
  424. struct schib schib;
  425. int ccode;
  426. if (!slow) {
  427. /* Will be done on the slow path. */
  428. return -EAGAIN;
  429. }
  430. /*
  431. * The first subchannel that is not-operational (ccode==3)
  432. * indicates that there aren't any more devices available.
  433. * If stsch gets an exception, it means the current subchannel set
  434. * is not valid.
  435. */
  436. ccode = stsch(schid, &schib);
  437. if (ccode)
  438. return (ccode == 3) ? -ENXIO : ccode;
  439. return css_probe_device(schid, &schib);
  440. }
  441. static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
  442. {
  443. int ret = 0;
  444. if (sch->driver) {
  445. if (sch->driver->sch_event)
  446. ret = sch->driver->sch_event(sch, slow);
  447. else
  448. dev_dbg(&sch->dev,
  449. "Got subchannel machine check but "
  450. "no sch_event handler provided.\n");
  451. }
  452. if (ret != 0 && ret != -EAGAIN) {
  453. CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
  454. sch->schid.ssid, sch->schid.sch_no, ret);
  455. }
  456. return ret;
  457. }
  458. static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
  459. {
  460. struct subchannel *sch;
  461. int ret;
  462. sch = get_subchannel_by_schid(schid);
  463. if (sch) {
  464. ret = css_evaluate_known_subchannel(sch, slow);
  465. put_device(&sch->dev);
  466. } else
  467. ret = css_evaluate_new_subchannel(schid, slow);
  468. if (ret == -EAGAIN)
  469. css_schedule_eval(schid);
  470. }
  471. /**
  472. * css_sched_sch_todo - schedule a subchannel operation
  473. * @sch: subchannel
  474. * @todo: todo
  475. *
  476. * Schedule the operation identified by @todo to be performed on the slow path
  477. * workqueue. Do nothing if another operation with higher priority is already
  478. * scheduled. Needs to be called with subchannel lock held.
  479. */
  480. void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
  481. {
  482. CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
  483. sch->schid.ssid, sch->schid.sch_no, todo);
  484. if (sch->todo >= todo)
  485. return;
  486. /* Get workqueue ref. */
  487. if (!get_device(&sch->dev))
  488. return;
  489. sch->todo = todo;
  490. if (!queue_work(cio_work_q, &sch->todo_work)) {
  491. /* Already queued, release workqueue ref. */
  492. put_device(&sch->dev);
  493. }
  494. }
  495. EXPORT_SYMBOL_GPL(css_sched_sch_todo);
  496. static void css_sch_todo(struct work_struct *work)
  497. {
  498. struct subchannel *sch;
  499. enum sch_todo todo;
  500. int ret;
  501. sch = container_of(work, struct subchannel, todo_work);
  502. /* Find out todo. */
  503. spin_lock_irq(&sch->lock);
  504. todo = sch->todo;
  505. CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
  506. sch->schid.sch_no, todo);
  507. sch->todo = SCH_TODO_NOTHING;
  508. spin_unlock_irq(&sch->lock);
  509. /* Perform todo. */
  510. switch (todo) {
  511. case SCH_TODO_NOTHING:
  512. break;
  513. case SCH_TODO_EVAL:
  514. ret = css_evaluate_known_subchannel(sch, 1);
  515. if (ret == -EAGAIN) {
  516. spin_lock_irq(&sch->lock);
  517. css_sched_sch_todo(sch, todo);
  518. spin_unlock_irq(&sch->lock);
  519. }
  520. break;
  521. case SCH_TODO_UNREG:
  522. css_sch_device_unregister(sch);
  523. break;
  524. }
  525. /* Release workqueue ref. */
  526. put_device(&sch->dev);
  527. }
  528. static struct idset *slow_subchannel_set;
  529. static DEFINE_SPINLOCK(slow_subchannel_lock);
  530. static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
  531. static atomic_t css_eval_scheduled;
  532. static int __init slow_subchannel_init(void)
  533. {
  534. atomic_set(&css_eval_scheduled, 0);
  535. slow_subchannel_set = idset_sch_new();
  536. if (!slow_subchannel_set) {
  537. CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
  538. return -ENOMEM;
  539. }
  540. return 0;
  541. }
  542. static int slow_eval_known_fn(struct subchannel *sch, void *data)
  543. {
  544. int eval;
  545. int rc;
  546. spin_lock_irq(&slow_subchannel_lock);
  547. eval = idset_sch_contains(slow_subchannel_set, sch->schid);
  548. idset_sch_del(slow_subchannel_set, sch->schid);
  549. spin_unlock_irq(&slow_subchannel_lock);
  550. if (eval) {
  551. rc = css_evaluate_known_subchannel(sch, 1);
  552. if (rc == -EAGAIN)
  553. css_schedule_eval(sch->schid);
  554. /*
  555. * The loop might take long time for platforms with lots of
  556. * known devices. Allow scheduling here.
  557. */
  558. cond_resched();
  559. }
  560. return 0;
  561. }
  562. static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
  563. {
  564. int eval;
  565. int rc = 0;
  566. spin_lock_irq(&slow_subchannel_lock);
  567. eval = idset_sch_contains(slow_subchannel_set, schid);
  568. idset_sch_del(slow_subchannel_set, schid);
  569. spin_unlock_irq(&slow_subchannel_lock);
  570. if (eval) {
  571. rc = css_evaluate_new_subchannel(schid, 1);
  572. switch (rc) {
  573. case -EAGAIN:
  574. css_schedule_eval(schid);
  575. rc = 0;
  576. break;
  577. case -ENXIO:
  578. case -ENOMEM:
  579. case -EIO:
  580. /* These should abort looping */
  581. spin_lock_irq(&slow_subchannel_lock);
  582. idset_sch_del_subseq(slow_subchannel_set, schid);
  583. spin_unlock_irq(&slow_subchannel_lock);
  584. break;
  585. default:
  586. rc = 0;
  587. }
  588. /* Allow scheduling here since the containing loop might
  589. * take a while. */
  590. cond_resched();
  591. }
  592. return rc;
  593. }
  594. static void css_slow_path_func(struct work_struct *unused)
  595. {
  596. unsigned long flags;
  597. CIO_TRACE_EVENT(4, "slowpath");
  598. for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
  599. NULL);
  600. spin_lock_irqsave(&slow_subchannel_lock, flags);
  601. if (idset_is_empty(slow_subchannel_set)) {
  602. atomic_set(&css_eval_scheduled, 0);
  603. wake_up(&css_eval_wq);
  604. }
  605. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  606. }
  607. static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
  608. struct workqueue_struct *cio_work_q;
  609. void css_schedule_eval(struct subchannel_id schid)
  610. {
  611. unsigned long flags;
  612. spin_lock_irqsave(&slow_subchannel_lock, flags);
  613. idset_sch_add(slow_subchannel_set, schid);
  614. atomic_set(&css_eval_scheduled, 1);
  615. queue_delayed_work(cio_work_q, &slow_path_work, 0);
  616. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  617. }
  618. void css_schedule_eval_all(void)
  619. {
  620. unsigned long flags;
  621. spin_lock_irqsave(&slow_subchannel_lock, flags);
  622. idset_fill(slow_subchannel_set);
  623. atomic_set(&css_eval_scheduled, 1);
  624. queue_delayed_work(cio_work_q, &slow_path_work, 0);
  625. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  626. }
  627. static int __unset_validpath(struct device *dev, void *data)
  628. {
  629. struct idset *set = data;
  630. struct subchannel *sch = to_subchannel(dev);
  631. struct pmcw *pmcw = &sch->schib.pmcw;
  632. /* Here we want to make sure that we are considering only those subchannels
  633. * which do not have an operational device attached to it. This can be found
  634. * with the help of PAM and POM values of pmcw. OPM provides the information
  635. * about any path which is currently vary-off, so that we should not consider.
  636. */
  637. if (sch->st == SUBCHANNEL_TYPE_IO &&
  638. (sch->opm & pmcw->pam & pmcw->pom))
  639. idset_sch_del(set, sch->schid);
  640. return 0;
  641. }
  642. static int __unset_online(struct device *dev, void *data)
  643. {
  644. struct idset *set = data;
  645. struct subchannel *sch = to_subchannel(dev);
  646. if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
  647. idset_sch_del(set, sch->schid);
  648. return 0;
  649. }
  650. void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
  651. {
  652. unsigned long flags;
  653. struct idset *set;
  654. /* Find unregistered subchannels. */
  655. set = idset_sch_new();
  656. if (!set) {
  657. /* Fallback. */
  658. css_schedule_eval_all();
  659. return;
  660. }
  661. idset_fill(set);
  662. switch (cond) {
  663. case CSS_EVAL_NO_PATH:
  664. bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
  665. break;
  666. case CSS_EVAL_NOT_ONLINE:
  667. bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
  668. break;
  669. default:
  670. break;
  671. }
  672. /* Apply to slow_subchannel_set. */
  673. spin_lock_irqsave(&slow_subchannel_lock, flags);
  674. idset_add_set(slow_subchannel_set, set);
  675. atomic_set(&css_eval_scheduled, 1);
  676. queue_delayed_work(cio_work_q, &slow_path_work, delay);
  677. spin_unlock_irqrestore(&slow_subchannel_lock, flags);
  678. idset_free(set);
  679. }
  680. void css_wait_for_slow_path(void)
  681. {
  682. flush_workqueue(cio_work_q);
  683. }
  684. /* Schedule reprobing of all subchannels with no valid operational path. */
  685. void css_schedule_reprobe(void)
  686. {
  687. /* Schedule with a delay to allow merging of subsequent calls. */
  688. css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
  689. }
  690. EXPORT_SYMBOL_GPL(css_schedule_reprobe);
  691. /*
  692. * Called from the machine check handler for subchannel report words.
  693. */
  694. static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
  695. {
  696. struct subchannel_id mchk_schid;
  697. struct subchannel *sch;
  698. if (overflow) {
  699. css_schedule_eval_all();
  700. return;
  701. }
  702. CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
  703. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  704. crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
  705. crw0->erc, crw0->rsid);
  706. if (crw1)
  707. CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
  708. "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
  709. crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
  710. crw1->anc, crw1->erc, crw1->rsid);
  711. init_subchannel_id(&mchk_schid);
  712. mchk_schid.sch_no = crw0->rsid;
  713. if (crw1)
  714. mchk_schid.ssid = (crw1->rsid >> 4) & 3;
  715. if (crw0->erc == CRW_ERC_PMOD) {
  716. sch = get_subchannel_by_schid(mchk_schid);
  717. if (sch) {
  718. css_update_ssd_info(sch);
  719. put_device(&sch->dev);
  720. }
  721. }
  722. /*
  723. * Since we are always presented with IPI in the CRW, we have to
  724. * use stsch() to find out if the subchannel in question has come
  725. * or gone.
  726. */
  727. css_evaluate_subchannel(mchk_schid, 0);
  728. }
  729. static void __init
  730. css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
  731. {
  732. struct cpuid cpu_id;
  733. if (css_general_characteristics.mcss) {
  734. css->global_pgid.pgid_high.ext_cssid.version = 0x80;
  735. css->global_pgid.pgid_high.ext_cssid.cssid =
  736. css->id_valid ? css->cssid : 0;
  737. } else {
  738. css->global_pgid.pgid_high.cpu_addr = stap();
  739. }
  740. get_cpu_id(&cpu_id);
  741. css->global_pgid.cpu_id = cpu_id.ident;
  742. css->global_pgid.cpu_model = cpu_id.machine;
  743. css->global_pgid.tod_high = tod_high;
  744. }
  745. static void channel_subsystem_release(struct device *dev)
  746. {
  747. struct channel_subsystem *css = to_css(dev);
  748. mutex_destroy(&css->mutex);
  749. kfree(css);
  750. }
  751. static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
  752. char *buf)
  753. {
  754. struct channel_subsystem *css = to_css(dev);
  755. if (!css->id_valid)
  756. return -EINVAL;
  757. return sysfs_emit(buf, "%x\n", css->cssid);
  758. }
  759. static DEVICE_ATTR_RO(real_cssid);
  760. static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
  761. const char *buf, size_t count)
  762. {
  763. CIO_TRACE_EVENT(4, "usr-rescan");
  764. css_schedule_eval_all();
  765. css_complete_work();
  766. return count;
  767. }
  768. static DEVICE_ATTR_WO(rescan);
  769. static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
  770. char *buf)
  771. {
  772. struct channel_subsystem *css = to_css(dev);
  773. int ret;
  774. mutex_lock(&css->mutex);
  775. ret = sysfs_emit(buf, "%x\n", css->cm_enabled);
  776. mutex_unlock(&css->mutex);
  777. return ret;
  778. }
  779. static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
  780. const char *buf, size_t count)
  781. {
  782. struct channel_subsystem *css = to_css(dev);
  783. unsigned long val;
  784. int ret;
  785. ret = kstrtoul(buf, 16, &val);
  786. if (ret)
  787. return ret;
  788. mutex_lock(&css->mutex);
  789. switch (val) {
  790. case 0:
  791. ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
  792. break;
  793. case 1:
  794. ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
  795. break;
  796. default:
  797. ret = -EINVAL;
  798. }
  799. mutex_unlock(&css->mutex);
  800. return ret < 0 ? ret : count;
  801. }
  802. static DEVICE_ATTR_RW(cm_enable);
  803. static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
  804. int index)
  805. {
  806. return css_chsc_characteristics.secm ? attr->mode : 0;
  807. }
  808. static struct attribute *cssdev_attrs[] = {
  809. &dev_attr_real_cssid.attr,
  810. &dev_attr_rescan.attr,
  811. NULL,
  812. };
  813. static struct attribute_group cssdev_attr_group = {
  814. .attrs = cssdev_attrs,
  815. };
  816. static struct attribute *cssdev_cm_attrs[] = {
  817. &dev_attr_cm_enable.attr,
  818. NULL,
  819. };
  820. static struct attribute_group cssdev_cm_attr_group = {
  821. .attrs = cssdev_cm_attrs,
  822. .is_visible = cm_enable_mode,
  823. };
  824. static const struct attribute_group *cssdev_attr_groups[] = {
  825. &cssdev_attr_group,
  826. &cssdev_cm_attr_group,
  827. NULL,
  828. };
  829. static int __init setup_css(int nr)
  830. {
  831. struct channel_subsystem *css;
  832. int ret;
  833. css = kzalloc(sizeof(*css), GFP_KERNEL);
  834. if (!css)
  835. return -ENOMEM;
  836. channel_subsystems[nr] = css;
  837. dev_set_name(&css->device, "css%x", nr);
  838. css->device.groups = cssdev_attr_groups;
  839. css->device.release = channel_subsystem_release;
  840. /*
  841. * We currently allocate notifier bits with this (using
  842. * css->device as the device argument with the DMA API)
  843. * and are fine with 64 bit addresses.
  844. */
  845. ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
  846. if (ret) {
  847. kfree(css);
  848. goto out_err;
  849. }
  850. mutex_init(&css->mutex);
  851. ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
  852. if (!ret) {
  853. css->id_valid = true;
  854. pr_info("Partition identifier %01x.%01x\n", css->cssid,
  855. css->iid);
  856. }
  857. css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
  858. ret = device_register(&css->device);
  859. if (ret) {
  860. put_device(&css->device);
  861. goto out_err;
  862. }
  863. css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
  864. GFP_KERNEL);
  865. if (!css->pseudo_subchannel) {
  866. device_unregister(&css->device);
  867. ret = -ENOMEM;
  868. goto out_err;
  869. }
  870. css->pseudo_subchannel->dev.parent = &css->device;
  871. css->pseudo_subchannel->dev.release = css_subchannel_release;
  872. mutex_init(&css->pseudo_subchannel->reg_mutex);
  873. css_sch_create_locks(css->pseudo_subchannel);
  874. dev_set_name(&css->pseudo_subchannel->dev, "defunct");
  875. ret = device_register(&css->pseudo_subchannel->dev);
  876. if (ret) {
  877. put_device(&css->pseudo_subchannel->dev);
  878. device_unregister(&css->device);
  879. goto out_err;
  880. }
  881. return ret;
  882. out_err:
  883. channel_subsystems[nr] = NULL;
  884. return ret;
  885. }
  886. static int css_reboot_event(struct notifier_block *this,
  887. unsigned long event,
  888. void *ptr)
  889. {
  890. struct channel_subsystem *css;
  891. int ret;
  892. ret = NOTIFY_DONE;
  893. for_each_css(css) {
  894. mutex_lock(&css->mutex);
  895. if (css->cm_enabled)
  896. if (chsc_secm(css, 0))
  897. ret = NOTIFY_BAD;
  898. mutex_unlock(&css->mutex);
  899. }
  900. return ret;
  901. }
  902. static struct notifier_block css_reboot_notifier = {
  903. .notifier_call = css_reboot_event,
  904. };
  905. #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
  906. static struct gen_pool *cio_dma_pool;
  907. /* Currently cio supports only a single css */
  908. struct device *cio_get_dma_css_dev(void)
  909. {
  910. return &channel_subsystems[0]->device;
  911. }
  912. struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
  913. {
  914. struct gen_pool *gp_dma;
  915. void *cpu_addr;
  916. dma_addr_t dma_addr;
  917. int i;
  918. gp_dma = gen_pool_create(3, -1);
  919. if (!gp_dma)
  920. return NULL;
  921. for (i = 0; i < nr_pages; ++i) {
  922. cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
  923. CIO_DMA_GFP);
  924. if (!cpu_addr)
  925. return gp_dma;
  926. gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
  927. dma_addr, PAGE_SIZE, -1);
  928. }
  929. return gp_dma;
  930. }
  931. static void __gp_dma_free_dma(struct gen_pool *pool,
  932. struct gen_pool_chunk *chunk, void *data)
  933. {
  934. size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
  935. dma_free_coherent((struct device *) data, chunk_size,
  936. (void *) chunk->start_addr,
  937. (dma_addr_t) chunk->phys_addr);
  938. }
  939. void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
  940. {
  941. if (!gp_dma)
  942. return;
  943. /* this is quite ugly but no better idea */
  944. gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
  945. gen_pool_destroy(gp_dma);
  946. }
  947. static int cio_dma_pool_init(void)
  948. {
  949. /* No need to free up the resources: compiled in */
  950. cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
  951. if (!cio_dma_pool)
  952. return -ENOMEM;
  953. return 0;
  954. }
  955. void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
  956. size_t size, dma32_t *dma_handle)
  957. {
  958. dma_addr_t dma_addr;
  959. size_t chunk_size;
  960. void *addr;
  961. if (!gp_dma)
  962. return NULL;
  963. addr = gen_pool_dma_alloc(gp_dma, size, &dma_addr);
  964. while (!addr) {
  965. chunk_size = round_up(size, PAGE_SIZE);
  966. addr = dma_alloc_coherent(dma_dev, chunk_size, &dma_addr, CIO_DMA_GFP);
  967. if (!addr)
  968. return NULL;
  969. gen_pool_add_virt(gp_dma, (unsigned long)addr, dma_addr, chunk_size, -1);
  970. addr = gen_pool_dma_alloc(gp_dma, size, dma_handle ? &dma_addr : NULL);
  971. }
  972. if (dma_handle)
  973. *dma_handle = (__force dma32_t)dma_addr;
  974. return addr;
  975. }
  976. void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
  977. size_t size)
  978. {
  979. return __cio_gp_dma_zalloc(gp_dma, dma_dev, size, NULL);
  980. }
  981. void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
  982. {
  983. if (!cpu_addr)
  984. return;
  985. memset(cpu_addr, 0, size);
  986. gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
  987. }
  988. /*
  989. * Allocate dma memory from the css global pool. Intended for memory not
  990. * specific to any single device within the css. The allocated memory
  991. * is not guaranteed to be 31-bit addressable.
  992. *
  993. * Caution: Not suitable for early stuff like console.
  994. */
  995. void *cio_dma_zalloc(size_t size)
  996. {
  997. return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
  998. }
  999. void cio_dma_free(void *cpu_addr, size_t size)
  1000. {
  1001. cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
  1002. }
  1003. /*
  1004. * Now that the driver core is running, we can setup our channel subsystem.
  1005. * The struct subchannel's are created during probing.
  1006. */
  1007. static int __init css_bus_init(void)
  1008. {
  1009. int ret, i;
  1010. ret = chsc_init();
  1011. if (ret)
  1012. return ret;
  1013. chsc_determine_css_characteristics();
  1014. /* Try to enable MSS. */
  1015. ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
  1016. if (ret)
  1017. max_ssid = 0;
  1018. else /* Success. */
  1019. max_ssid = __MAX_SSID;
  1020. ret = slow_subchannel_init();
  1021. if (ret)
  1022. goto out;
  1023. ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
  1024. if (ret)
  1025. goto out;
  1026. if ((ret = bus_register(&css_bus_type)))
  1027. goto out;
  1028. /* Setup css structure. */
  1029. for (i = 0; i <= MAX_CSS_IDX; i++) {
  1030. ret = setup_css(i);
  1031. if (ret)
  1032. goto out_unregister;
  1033. }
  1034. ret = register_reboot_notifier(&css_reboot_notifier);
  1035. if (ret)
  1036. goto out_unregister;
  1037. ret = cio_dma_pool_init();
  1038. if (ret)
  1039. goto out_unregister_rn;
  1040. airq_init();
  1041. css_init_done = 1;
  1042. /* Enable default isc for I/O subchannels. */
  1043. isc_register(IO_SCH_ISC);
  1044. return 0;
  1045. out_unregister_rn:
  1046. unregister_reboot_notifier(&css_reboot_notifier);
  1047. out_unregister:
  1048. while (i-- > 0) {
  1049. struct channel_subsystem *css = channel_subsystems[i];
  1050. device_unregister(&css->pseudo_subchannel->dev);
  1051. device_unregister(&css->device);
  1052. }
  1053. bus_unregister(&css_bus_type);
  1054. out:
  1055. crw_unregister_handler(CRW_RSC_SCH);
  1056. idset_free(slow_subchannel_set);
  1057. chsc_init_cleanup();
  1058. pr_alert("The CSS device driver initialization failed with "
  1059. "errno=%d\n", ret);
  1060. return ret;
  1061. }
  1062. static void __init css_bus_cleanup(void)
  1063. {
  1064. struct channel_subsystem *css;
  1065. for_each_css(css) {
  1066. device_unregister(&css->pseudo_subchannel->dev);
  1067. device_unregister(&css->device);
  1068. }
  1069. bus_unregister(&css_bus_type);
  1070. crw_unregister_handler(CRW_RSC_SCH);
  1071. idset_free(slow_subchannel_set);
  1072. chsc_init_cleanup();
  1073. isc_unregister(IO_SCH_ISC);
  1074. }
  1075. static int __init channel_subsystem_init(void)
  1076. {
  1077. int ret;
  1078. ret = css_bus_init();
  1079. if (ret)
  1080. return ret;
  1081. cio_work_q = create_singlethread_workqueue("cio");
  1082. if (!cio_work_q) {
  1083. ret = -ENOMEM;
  1084. goto out_bus;
  1085. }
  1086. ret = io_subchannel_init();
  1087. if (ret)
  1088. goto out_wq;
  1089. /* Register subchannels which are already in use. */
  1090. cio_register_early_subchannels();
  1091. /* Start initial subchannel evaluation. */
  1092. css_schedule_eval_all();
  1093. return ret;
  1094. out_wq:
  1095. destroy_workqueue(cio_work_q);
  1096. out_bus:
  1097. css_bus_cleanup();
  1098. return ret;
  1099. }
  1100. subsys_initcall(channel_subsystem_init);
  1101. static int css_settle(struct device_driver *drv, void *unused)
  1102. {
  1103. struct css_driver *cssdrv = to_cssdriver(drv);
  1104. if (cssdrv->settle)
  1105. return cssdrv->settle();
  1106. return 0;
  1107. }
  1108. int css_complete_work(void)
  1109. {
  1110. int ret;
  1111. /* Wait for the evaluation of subchannels to finish. */
  1112. ret = wait_event_interruptible(css_eval_wq,
  1113. atomic_read(&css_eval_scheduled) == 0);
  1114. if (ret)
  1115. return -EINTR;
  1116. flush_workqueue(cio_work_q);
  1117. /* Wait for the subchannel type specific initialization to finish */
  1118. return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
  1119. }
  1120. /*
  1121. * Wait for the initialization of devices to finish, to make sure we are
  1122. * done with our setup if the search for the root device starts.
  1123. */
  1124. static int __init channel_subsystem_init_sync(void)
  1125. {
  1126. css_complete_work();
  1127. return 0;
  1128. }
  1129. subsys_initcall_sync(channel_subsystem_init_sync);
  1130. #ifdef CONFIG_PROC_FS
  1131. static ssize_t cio_settle_write(struct file *file, const char __user *buf,
  1132. size_t count, loff_t *ppos)
  1133. {
  1134. int ret;
  1135. /* Handle pending CRW's. */
  1136. crw_wait_for_channel_report();
  1137. ret = css_complete_work();
  1138. return ret ? ret : count;
  1139. }
  1140. static const struct proc_ops cio_settle_proc_ops = {
  1141. .proc_open = nonseekable_open,
  1142. .proc_write = cio_settle_write,
  1143. };
  1144. static int __init cio_settle_init(void)
  1145. {
  1146. struct proc_dir_entry *entry;
  1147. entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
  1148. if (!entry)
  1149. return -ENOMEM;
  1150. return 0;
  1151. }
  1152. device_initcall(cio_settle_init);
  1153. #endif /*CONFIG_PROC_FS*/
  1154. int sch_is_pseudo_sch(struct subchannel *sch)
  1155. {
  1156. if (!sch->dev.parent)
  1157. return 0;
  1158. return sch == to_css(sch->dev.parent)->pseudo_subchannel;
  1159. }
  1160. static int css_bus_match(struct device *dev, const struct device_driver *drv)
  1161. {
  1162. struct subchannel *sch = to_subchannel(dev);
  1163. const struct css_driver *driver = to_cssdriver(drv);
  1164. struct css_device_id *id;
  1165. /* When driver_override is set, only bind to the matching driver */
  1166. if (sch->driver_override && strcmp(sch->driver_override, drv->name))
  1167. return 0;
  1168. for (id = driver->subchannel_type; id->match_flags; id++) {
  1169. if (sch->st == id->type)
  1170. return 1;
  1171. }
  1172. return 0;
  1173. }
  1174. static int css_probe(struct device *dev)
  1175. {
  1176. struct subchannel *sch;
  1177. int ret;
  1178. sch = to_subchannel(dev);
  1179. sch->driver = to_cssdriver(dev->driver);
  1180. ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
  1181. if (ret)
  1182. sch->driver = NULL;
  1183. return ret;
  1184. }
  1185. static void css_remove(struct device *dev)
  1186. {
  1187. struct subchannel *sch;
  1188. sch = to_subchannel(dev);
  1189. if (sch->driver->remove)
  1190. sch->driver->remove(sch);
  1191. sch->driver = NULL;
  1192. }
  1193. static void css_shutdown(struct device *dev)
  1194. {
  1195. struct subchannel *sch;
  1196. sch = to_subchannel(dev);
  1197. if (sch->driver && sch->driver->shutdown)
  1198. sch->driver->shutdown(sch);
  1199. }
  1200. static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
  1201. {
  1202. const struct subchannel *sch = to_subchannel(dev);
  1203. int ret;
  1204. ret = add_uevent_var(env, "ST=%01X", sch->st);
  1205. if (ret)
  1206. return ret;
  1207. ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
  1208. return ret;
  1209. }
  1210. static const struct bus_type css_bus_type = {
  1211. .name = "css",
  1212. .match = css_bus_match,
  1213. .probe = css_probe,
  1214. .remove = css_remove,
  1215. .shutdown = css_shutdown,
  1216. .uevent = css_uevent,
  1217. };
  1218. /**
  1219. * css_driver_register - register a css driver
  1220. * @cdrv: css driver to register
  1221. *
  1222. * This is mainly a wrapper around driver_register that sets name
  1223. * and bus_type in the embedded struct device_driver correctly.
  1224. */
  1225. int css_driver_register(struct css_driver *cdrv)
  1226. {
  1227. cdrv->drv.bus = &css_bus_type;
  1228. return driver_register(&cdrv->drv);
  1229. }
  1230. EXPORT_SYMBOL_GPL(css_driver_register);
  1231. /**
  1232. * css_driver_unregister - unregister a css driver
  1233. * @cdrv: css driver to unregister
  1234. *
  1235. * This is a wrapper around driver_unregister.
  1236. */
  1237. void css_driver_unregister(struct css_driver *cdrv)
  1238. {
  1239. driver_unregister(&cdrv->drv);
  1240. }
  1241. EXPORT_SYMBOL_GPL(css_driver_unregister);