dasd_alias.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PAV alias management for the DASD ECKD discipline
  4. *
  5. * Copyright IBM Corp. 2007
  6. * Author(s): Stefan Weinhuber <wein@de.ibm.com>
  7. */
  8. #include <linux/list.h>
  9. #include <linux/slab.h>
  10. #include <asm/ebcdic.h>
  11. #include "dasd_int.h"
  12. #include "dasd_eckd.h"
  13. /*
  14. * General concept of alias management:
  15. * - PAV and DASD alias management is specific to the eckd discipline.
  16. * - A device is connected to an lcu as long as the device exists.
  17. * dasd_alias_make_device_known_to_lcu will be called wenn the
  18. * device is checked by the eckd discipline and
  19. * dasd_alias_disconnect_device_from_lcu will be called
  20. * before the device is deleted.
  21. * - The dasd_alias_add_device / dasd_alias_remove_device
  22. * functions mark the point when a device is 'ready for service'.
  23. * - A summary unit check is a rare occasion, but it is mandatory to
  24. * support it. It requires some complex recovery actions before the
  25. * devices can be used again (see dasd_alias_handle_summary_unit_check).
  26. * - dasd_alias_get_start_dev will find an alias device that can be used
  27. * instead of the base device and does some (very simple) load balancing.
  28. * This is the function that gets called for each I/O, so when improving
  29. * something, this function should get faster or better, the rest has just
  30. * to be correct.
  31. */
  32. static void summary_unit_check_handling_work(struct work_struct *);
  33. static void lcu_update_work(struct work_struct *);
  34. static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
  35. static struct alias_root aliastree = {
  36. .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
  37. .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
  38. };
  39. static struct alias_server *_find_server(struct dasd_uid *uid)
  40. {
  41. struct alias_server *pos;
  42. list_for_each_entry(pos, &aliastree.serverlist, server) {
  43. if (!strncmp(pos->uid.vendor, uid->vendor,
  44. sizeof(uid->vendor))
  45. && !strncmp(pos->uid.serial, uid->serial,
  46. sizeof(uid->serial)))
  47. return pos;
  48. }
  49. return NULL;
  50. }
  51. static struct alias_lcu *_find_lcu(struct alias_server *server,
  52. struct dasd_uid *uid)
  53. {
  54. struct alias_lcu *pos;
  55. list_for_each_entry(pos, &server->lculist, lcu) {
  56. if (pos->uid.ssid == uid->ssid)
  57. return pos;
  58. }
  59. return NULL;
  60. }
  61. static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
  62. struct dasd_uid *uid)
  63. {
  64. struct alias_pav_group *pos;
  65. __u8 search_unit_addr;
  66. /* for hyper pav there is only one group */
  67. if (lcu->pav == HYPER_PAV) {
  68. if (list_empty(&lcu->grouplist))
  69. return NULL;
  70. else
  71. return list_first_entry(&lcu->grouplist,
  72. struct alias_pav_group, group);
  73. }
  74. /* for base pav we have to find the group that matches the base */
  75. if (uid->type == UA_BASE_DEVICE)
  76. search_unit_addr = uid->real_unit_addr;
  77. else
  78. search_unit_addr = uid->base_unit_addr;
  79. list_for_each_entry(pos, &lcu->grouplist, group) {
  80. if (pos->uid.base_unit_addr == search_unit_addr &&
  81. !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
  82. return pos;
  83. }
  84. return NULL;
  85. }
  86. static struct alias_server *_allocate_server(struct dasd_uid *uid)
  87. {
  88. struct alias_server *server;
  89. server = kzalloc(sizeof(*server), GFP_KERNEL);
  90. if (!server)
  91. return ERR_PTR(-ENOMEM);
  92. memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
  93. memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
  94. INIT_LIST_HEAD(&server->server);
  95. INIT_LIST_HEAD(&server->lculist);
  96. return server;
  97. }
  98. static void _free_server(struct alias_server *server)
  99. {
  100. kfree(server);
  101. }
  102. static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
  103. {
  104. struct alias_lcu *lcu;
  105. lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
  106. if (!lcu)
  107. return ERR_PTR(-ENOMEM);
  108. lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
  109. if (!lcu->uac)
  110. goto out_err1;
  111. lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
  112. if (!lcu->rsu_cqr)
  113. goto out_err2;
  114. lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
  115. GFP_KERNEL | GFP_DMA);
  116. if (!lcu->rsu_cqr->cpaddr)
  117. goto out_err3;
  118. lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
  119. if (!lcu->rsu_cqr->data)
  120. goto out_err4;
  121. memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
  122. memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
  123. lcu->uid.ssid = uid->ssid;
  124. lcu->pav = NO_PAV;
  125. lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
  126. INIT_LIST_HEAD(&lcu->lcu);
  127. INIT_LIST_HEAD(&lcu->inactive_devices);
  128. INIT_LIST_HEAD(&lcu->active_devices);
  129. INIT_LIST_HEAD(&lcu->grouplist);
  130. INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
  131. INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
  132. spin_lock_init(&lcu->lock);
  133. init_completion(&lcu->lcu_setup);
  134. return lcu;
  135. out_err4:
  136. kfree(lcu->rsu_cqr->cpaddr);
  137. out_err3:
  138. kfree(lcu->rsu_cqr);
  139. out_err2:
  140. kfree(lcu->uac);
  141. out_err1:
  142. kfree(lcu);
  143. return ERR_PTR(-ENOMEM);
  144. }
  145. static void _free_lcu(struct alias_lcu *lcu)
  146. {
  147. kfree(lcu->rsu_cqr->data);
  148. kfree(lcu->rsu_cqr->cpaddr);
  149. kfree(lcu->rsu_cqr);
  150. kfree(lcu->uac);
  151. kfree(lcu);
  152. }
  153. /*
  154. * This is the function that will allocate all the server and lcu data,
  155. * so this function must be called first for a new device.
  156. * If the return value is 1, the lcu was already known before, if it
  157. * is 0, this is a new lcu.
  158. * Negative return code indicates that something went wrong (e.g. -ENOMEM)
  159. */
  160. int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
  161. {
  162. struct dasd_eckd_private *private = device->private;
  163. unsigned long flags;
  164. struct alias_server *server, *newserver;
  165. struct alias_lcu *lcu, *newlcu;
  166. struct dasd_uid uid;
  167. device->discipline->get_uid(device, &uid);
  168. spin_lock_irqsave(&aliastree.lock, flags);
  169. server = _find_server(&uid);
  170. if (!server) {
  171. spin_unlock_irqrestore(&aliastree.lock, flags);
  172. newserver = _allocate_server(&uid);
  173. if (IS_ERR(newserver))
  174. return PTR_ERR(newserver);
  175. spin_lock_irqsave(&aliastree.lock, flags);
  176. server = _find_server(&uid);
  177. if (!server) {
  178. list_add(&newserver->server, &aliastree.serverlist);
  179. server = newserver;
  180. } else {
  181. /* someone was faster */
  182. _free_server(newserver);
  183. }
  184. }
  185. lcu = _find_lcu(server, &uid);
  186. if (!lcu) {
  187. spin_unlock_irqrestore(&aliastree.lock, flags);
  188. newlcu = _allocate_lcu(&uid);
  189. if (IS_ERR(newlcu))
  190. return PTR_ERR(newlcu);
  191. spin_lock_irqsave(&aliastree.lock, flags);
  192. lcu = _find_lcu(server, &uid);
  193. if (!lcu) {
  194. list_add(&newlcu->lcu, &server->lculist);
  195. lcu = newlcu;
  196. } else {
  197. /* someone was faster */
  198. _free_lcu(newlcu);
  199. }
  200. }
  201. spin_lock(&lcu->lock);
  202. list_add(&device->alias_list, &lcu->inactive_devices);
  203. private->lcu = lcu;
  204. spin_unlock(&lcu->lock);
  205. spin_unlock_irqrestore(&aliastree.lock, flags);
  206. return 0;
  207. }
  208. /*
  209. * This function removes a device from the scope of alias management.
  210. * The complicated part is to make sure that it is not in use by
  211. * any of the workers. If necessary cancel the work.
  212. */
  213. void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
  214. {
  215. struct dasd_eckd_private *private = device->private;
  216. unsigned long flags;
  217. struct alias_lcu *lcu;
  218. struct alias_server *server;
  219. int was_pending;
  220. struct dasd_uid uid;
  221. lcu = private->lcu;
  222. /* nothing to do if already disconnected */
  223. if (!lcu)
  224. return;
  225. device->discipline->get_uid(device, &uid);
  226. spin_lock_irqsave(&lcu->lock, flags);
  227. /* make sure that the workers don't use this device */
  228. if (device == lcu->suc_data.device) {
  229. spin_unlock_irqrestore(&lcu->lock, flags);
  230. cancel_work_sync(&lcu->suc_data.worker);
  231. spin_lock_irqsave(&lcu->lock, flags);
  232. if (device == lcu->suc_data.device) {
  233. dasd_put_device(device);
  234. lcu->suc_data.device = NULL;
  235. }
  236. }
  237. was_pending = 0;
  238. if (device == lcu->ruac_data.device) {
  239. spin_unlock_irqrestore(&lcu->lock, flags);
  240. was_pending = 1;
  241. cancel_delayed_work_sync(&lcu->ruac_data.dwork);
  242. spin_lock_irqsave(&lcu->lock, flags);
  243. if (device == lcu->ruac_data.device) {
  244. dasd_put_device(device);
  245. lcu->ruac_data.device = NULL;
  246. }
  247. }
  248. private->lcu = NULL;
  249. spin_unlock_irqrestore(&lcu->lock, flags);
  250. spin_lock_irqsave(&aliastree.lock, flags);
  251. spin_lock(&lcu->lock);
  252. list_del_init(&device->alias_list);
  253. if (list_empty(&lcu->grouplist) &&
  254. list_empty(&lcu->active_devices) &&
  255. list_empty(&lcu->inactive_devices)) {
  256. list_del(&lcu->lcu);
  257. spin_unlock(&lcu->lock);
  258. _free_lcu(lcu);
  259. lcu = NULL;
  260. } else {
  261. if (was_pending)
  262. _schedule_lcu_update(lcu, NULL);
  263. spin_unlock(&lcu->lock);
  264. }
  265. server = _find_server(&uid);
  266. if (server && list_empty(&server->lculist)) {
  267. list_del(&server->server);
  268. _free_server(server);
  269. }
  270. spin_unlock_irqrestore(&aliastree.lock, flags);
  271. }
  272. /*
  273. * This function assumes that the unit address configuration stored
  274. * in the lcu is up to date and will update the device uid before
  275. * adding it to a pav group.
  276. */
  277. static int _add_device_to_lcu(struct alias_lcu *lcu,
  278. struct dasd_device *device,
  279. struct dasd_device *pos)
  280. {
  281. struct dasd_eckd_private *private = device->private;
  282. struct alias_pav_group *group;
  283. struct dasd_uid uid;
  284. spin_lock(get_ccwdev_lock(device->cdev));
  285. private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
  286. private->uid.base_unit_addr =
  287. lcu->uac->unit[private->uid.real_unit_addr].base_ua;
  288. uid = private->uid;
  289. spin_unlock(get_ccwdev_lock(device->cdev));
  290. /* if we have no PAV anyway, we don't need to bother with PAV groups */
  291. if (lcu->pav == NO_PAV) {
  292. list_move(&device->alias_list, &lcu->active_devices);
  293. return 0;
  294. }
  295. group = _find_group(lcu, &uid);
  296. if (!group) {
  297. group = kzalloc(sizeof(*group), GFP_ATOMIC);
  298. if (!group)
  299. return -ENOMEM;
  300. memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
  301. memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
  302. group->uid.ssid = uid.ssid;
  303. if (uid.type == UA_BASE_DEVICE)
  304. group->uid.base_unit_addr = uid.real_unit_addr;
  305. else
  306. group->uid.base_unit_addr = uid.base_unit_addr;
  307. memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
  308. INIT_LIST_HEAD(&group->group);
  309. INIT_LIST_HEAD(&group->baselist);
  310. INIT_LIST_HEAD(&group->aliaslist);
  311. list_add(&group->group, &lcu->grouplist);
  312. }
  313. if (uid.type == UA_BASE_DEVICE)
  314. list_move(&device->alias_list, &group->baselist);
  315. else
  316. list_move(&device->alias_list, &group->aliaslist);
  317. private->pavgroup = group;
  318. return 0;
  319. };
  320. static void _remove_device_from_lcu(struct alias_lcu *lcu,
  321. struct dasd_device *device)
  322. {
  323. struct dasd_eckd_private *private = device->private;
  324. struct alias_pav_group *group;
  325. list_move(&device->alias_list, &lcu->inactive_devices);
  326. group = private->pavgroup;
  327. if (!group)
  328. return;
  329. private->pavgroup = NULL;
  330. if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
  331. list_del(&group->group);
  332. kfree(group);
  333. return;
  334. }
  335. if (group->next == device)
  336. group->next = NULL;
  337. };
  338. static int
  339. suborder_not_supported(struct dasd_ccw_req *cqr)
  340. {
  341. char *sense;
  342. char reason;
  343. char msg_format;
  344. char msg_no;
  345. /*
  346. * intrc values ENODEV, ENOLINK and EPERM
  347. * will be optained from sleep_on to indicate that no
  348. * IO operation can be started
  349. */
  350. if (cqr->intrc == -ENODEV)
  351. return 1;
  352. if (cqr->intrc == -ENOLINK)
  353. return 1;
  354. if (cqr->intrc == -EPERM)
  355. return 1;
  356. sense = dasd_get_sense(&cqr->irb);
  357. if (!sense)
  358. return 0;
  359. reason = sense[0];
  360. msg_format = (sense[7] & 0xF0);
  361. msg_no = (sense[7] & 0x0F);
  362. /* command reject, Format 0 MSG 4 - invalid parameter */
  363. if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
  364. return 1;
  365. return 0;
  366. }
  367. static int read_unit_address_configuration(struct dasd_device *device,
  368. struct alias_lcu *lcu)
  369. {
  370. struct dasd_psf_prssd_data *prssdp;
  371. struct dasd_ccw_req *cqr;
  372. struct ccw1 *ccw;
  373. int rc;
  374. unsigned long flags;
  375. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  376. (sizeof(struct dasd_psf_prssd_data)),
  377. device, NULL);
  378. if (IS_ERR(cqr))
  379. return PTR_ERR(cqr);
  380. cqr->startdev = device;
  381. cqr->memdev = device;
  382. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  383. cqr->retries = 10;
  384. cqr->expires = 20 * HZ;
  385. /* Prepare for Read Subsystem Data */
  386. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  387. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  388. prssdp->order = PSF_ORDER_PRSSD;
  389. prssdp->suborder = 0x0e; /* Read unit address configuration */
  390. /* all other bytes of prssdp must be zero */
  391. ccw = cqr->cpaddr;
  392. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  393. ccw->count = sizeof(struct dasd_psf_prssd_data);
  394. ccw->flags |= CCW_FLAG_CC;
  395. ccw->cda = virt_to_dma32(prssdp);
  396. /* Read Subsystem Data - feature codes */
  397. memset(lcu->uac, 0, sizeof(*(lcu->uac)));
  398. ccw++;
  399. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  400. ccw->count = sizeof(*(lcu->uac));
  401. ccw->cda = virt_to_dma32(lcu->uac);
  402. cqr->buildclk = get_tod_clock();
  403. cqr->status = DASD_CQR_FILLED;
  404. /* need to unset flag here to detect race with summary unit check */
  405. spin_lock_irqsave(&lcu->lock, flags);
  406. lcu->flags &= ~NEED_UAC_UPDATE;
  407. spin_unlock_irqrestore(&lcu->lock, flags);
  408. rc = dasd_sleep_on(cqr);
  409. if (!rc)
  410. goto out;
  411. if (suborder_not_supported(cqr)) {
  412. /* suborder not supported or device unusable for IO */
  413. rc = -EOPNOTSUPP;
  414. } else {
  415. /* IO failed but should be retried */
  416. spin_lock_irqsave(&lcu->lock, flags);
  417. lcu->flags |= NEED_UAC_UPDATE;
  418. spin_unlock_irqrestore(&lcu->lock, flags);
  419. }
  420. out:
  421. dasd_sfree_request(cqr, cqr->memdev);
  422. return rc;
  423. }
  424. static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
  425. {
  426. unsigned long flags;
  427. struct alias_pav_group *pavgroup, *tempgroup;
  428. struct dasd_device *device, *tempdev;
  429. int i, rc;
  430. struct dasd_eckd_private *private;
  431. spin_lock_irqsave(&lcu->lock, flags);
  432. list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
  433. list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
  434. alias_list) {
  435. list_move(&device->alias_list, &lcu->active_devices);
  436. private = device->private;
  437. private->pavgroup = NULL;
  438. }
  439. list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
  440. alias_list) {
  441. list_move(&device->alias_list, &lcu->active_devices);
  442. private = device->private;
  443. private->pavgroup = NULL;
  444. }
  445. list_del(&pavgroup->group);
  446. kfree(pavgroup);
  447. }
  448. spin_unlock_irqrestore(&lcu->lock, flags);
  449. rc = read_unit_address_configuration(refdev, lcu);
  450. if (rc)
  451. return rc;
  452. spin_lock_irqsave(&lcu->lock, flags);
  453. /*
  454. * there is another update needed skip the remaining handling
  455. * the data might already be outdated
  456. * but especially do not add the device to an LCU with pending
  457. * update
  458. */
  459. if (lcu->flags & NEED_UAC_UPDATE)
  460. goto out;
  461. lcu->pav = NO_PAV;
  462. for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
  463. switch (lcu->uac->unit[i].ua_type) {
  464. case UA_BASE_PAV_ALIAS:
  465. lcu->pav = BASE_PAV;
  466. break;
  467. case UA_HYPER_PAV_ALIAS:
  468. lcu->pav = HYPER_PAV;
  469. break;
  470. }
  471. if (lcu->pav != NO_PAV)
  472. break;
  473. }
  474. list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
  475. alias_list) {
  476. _add_device_to_lcu(lcu, device, refdev);
  477. }
  478. out:
  479. spin_unlock_irqrestore(&lcu->lock, flags);
  480. return 0;
  481. }
  482. static void lcu_update_work(struct work_struct *work)
  483. {
  484. struct alias_lcu *lcu;
  485. struct read_uac_work_data *ruac_data;
  486. struct dasd_device *device;
  487. unsigned long flags;
  488. int rc;
  489. ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
  490. lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
  491. device = ruac_data->device;
  492. rc = _lcu_update(device, lcu);
  493. /*
  494. * Need to check flags again, as there could have been another
  495. * prepare_update or a new device a new device while we were still
  496. * processing the data
  497. */
  498. spin_lock_irqsave(&lcu->lock, flags);
  499. if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
  500. DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
  501. " alias data in lcu (rc = %d), retry later", rc);
  502. if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
  503. dasd_put_device(device);
  504. } else {
  505. dasd_put_device(device);
  506. lcu->ruac_data.device = NULL;
  507. lcu->flags &= ~UPDATE_PENDING;
  508. }
  509. spin_unlock_irqrestore(&lcu->lock, flags);
  510. }
  511. static int _schedule_lcu_update(struct alias_lcu *lcu,
  512. struct dasd_device *device)
  513. {
  514. struct dasd_device *usedev = NULL;
  515. struct alias_pav_group *group;
  516. lcu->flags |= NEED_UAC_UPDATE;
  517. if (lcu->ruac_data.device) {
  518. /* already scheduled or running */
  519. return 0;
  520. }
  521. if (device && !list_empty(&device->alias_list))
  522. usedev = device;
  523. if (!usedev && !list_empty(&lcu->grouplist)) {
  524. group = list_first_entry(&lcu->grouplist,
  525. struct alias_pav_group, group);
  526. if (!list_empty(&group->baselist))
  527. usedev = list_first_entry(&group->baselist,
  528. struct dasd_device,
  529. alias_list);
  530. else if (!list_empty(&group->aliaslist))
  531. usedev = list_first_entry(&group->aliaslist,
  532. struct dasd_device,
  533. alias_list);
  534. }
  535. if (!usedev && !list_empty(&lcu->active_devices)) {
  536. usedev = list_first_entry(&lcu->active_devices,
  537. struct dasd_device, alias_list);
  538. }
  539. /*
  540. * if we haven't found a proper device yet, give up for now, the next
  541. * device that will be set active will trigger an lcu update
  542. */
  543. if (!usedev)
  544. return -EINVAL;
  545. dasd_get_device(usedev);
  546. lcu->ruac_data.device = usedev;
  547. if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
  548. dasd_put_device(usedev);
  549. return 0;
  550. }
  551. int dasd_alias_add_device(struct dasd_device *device)
  552. {
  553. struct dasd_eckd_private *private = device->private;
  554. __u8 uaddr = private->uid.real_unit_addr;
  555. struct alias_lcu *lcu = private->lcu;
  556. unsigned long flags;
  557. int rc;
  558. rc = 0;
  559. spin_lock_irqsave(&lcu->lock, flags);
  560. /*
  561. * Check if device and lcu type differ. If so, the uac data may be
  562. * outdated and needs to be updated.
  563. */
  564. if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
  565. lcu->flags |= UPDATE_PENDING;
  566. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  567. "uid type mismatch - trigger rescan");
  568. }
  569. if (!(lcu->flags & UPDATE_PENDING)) {
  570. rc = _add_device_to_lcu(lcu, device, device);
  571. if (rc)
  572. lcu->flags |= UPDATE_PENDING;
  573. }
  574. if (lcu->flags & UPDATE_PENDING) {
  575. list_move(&device->alias_list, &lcu->active_devices);
  576. private->pavgroup = NULL;
  577. _schedule_lcu_update(lcu, device);
  578. }
  579. spin_unlock_irqrestore(&lcu->lock, flags);
  580. return rc;
  581. }
  582. int dasd_alias_update_add_device(struct dasd_device *device)
  583. {
  584. struct dasd_eckd_private *private = device->private;
  585. private->lcu->flags |= UPDATE_PENDING;
  586. return dasd_alias_add_device(device);
  587. }
  588. int dasd_alias_remove_device(struct dasd_device *device)
  589. {
  590. struct dasd_eckd_private *private = device->private;
  591. struct alias_lcu *lcu = private->lcu;
  592. unsigned long flags;
  593. /* nothing to do if already removed */
  594. if (!lcu)
  595. return 0;
  596. spin_lock_irqsave(&lcu->lock, flags);
  597. _remove_device_from_lcu(lcu, device);
  598. spin_unlock_irqrestore(&lcu->lock, flags);
  599. return 0;
  600. }
  601. struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
  602. {
  603. struct dasd_eckd_private *alias_priv, *private = base_device->private;
  604. struct alias_lcu *lcu = private->lcu;
  605. struct dasd_device *alias_device;
  606. struct alias_pav_group *group;
  607. unsigned long flags;
  608. if (!lcu)
  609. return NULL;
  610. if (lcu->pav == NO_PAV ||
  611. lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
  612. return NULL;
  613. if (unlikely(!(private->features.feature[8] & 0x01))) {
  614. /*
  615. * PAV enabled but prefix not, very unlikely
  616. * seems to be a lost pathgroup
  617. * use base device to do IO
  618. */
  619. DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
  620. "Prefix not enabled with PAV enabled\n");
  621. return NULL;
  622. }
  623. spin_lock_irqsave(&lcu->lock, flags);
  624. group = private->pavgroup;
  625. if (!group) {
  626. spin_unlock_irqrestore(&lcu->lock, flags);
  627. return NULL;
  628. }
  629. alias_device = group->next;
  630. if (!alias_device) {
  631. if (list_empty(&group->aliaslist)) {
  632. spin_unlock_irqrestore(&lcu->lock, flags);
  633. return NULL;
  634. } else {
  635. alias_device = list_first_entry(&group->aliaslist,
  636. struct dasd_device,
  637. alias_list);
  638. }
  639. }
  640. if (list_is_last(&alias_device->alias_list, &group->aliaslist))
  641. group->next = list_first_entry(&group->aliaslist,
  642. struct dasd_device, alias_list);
  643. else
  644. group->next = list_first_entry(&alias_device->alias_list,
  645. struct dasd_device, alias_list);
  646. spin_unlock_irqrestore(&lcu->lock, flags);
  647. alias_priv = alias_device->private;
  648. if ((alias_priv->count < private->count) && !alias_device->stopped &&
  649. !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
  650. return alias_device;
  651. else
  652. return NULL;
  653. }
  654. /*
  655. * Summary unit check handling depends on the way alias devices
  656. * are handled so it is done here rather then in dasd_eckd.c
  657. */
  658. static int reset_summary_unit_check(struct alias_lcu *lcu,
  659. struct dasd_device *device,
  660. char reason)
  661. {
  662. struct dasd_ccw_req *cqr;
  663. int rc = 0;
  664. struct ccw1 *ccw;
  665. cqr = lcu->rsu_cqr;
  666. memcpy((char *) &cqr->magic, "ECKD", 4);
  667. ASCEBC((char *) &cqr->magic, 4);
  668. ccw = cqr->cpaddr;
  669. ccw->cmd_code = DASD_ECKD_CCW_RSCK;
  670. ccw->flags = CCW_FLAG_SLI;
  671. ccw->count = 16;
  672. ccw->cda = virt_to_dma32(cqr->data);
  673. ((char *)cqr->data)[0] = reason;
  674. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  675. cqr->retries = 255; /* set retry counter to enable basic ERP */
  676. cqr->startdev = device;
  677. cqr->memdev = device;
  678. cqr->block = NULL;
  679. cqr->expires = 5 * HZ;
  680. cqr->buildclk = get_tod_clock();
  681. cqr->status = DASD_CQR_FILLED;
  682. rc = dasd_sleep_on_immediatly(cqr);
  683. return rc;
  684. }
  685. static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
  686. {
  687. struct alias_pav_group *pavgroup;
  688. struct dasd_device *device;
  689. struct dasd_eckd_private *private;
  690. /* active and inactive list can contain alias as well as base devices */
  691. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  692. private = device->private;
  693. if (private->uid.type != UA_BASE_DEVICE)
  694. continue;
  695. dasd_schedule_block_bh(device->block);
  696. dasd_schedule_device_bh(device);
  697. }
  698. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  699. private = device->private;
  700. if (private->uid.type != UA_BASE_DEVICE)
  701. continue;
  702. dasd_schedule_block_bh(device->block);
  703. dasd_schedule_device_bh(device);
  704. }
  705. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  706. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  707. dasd_schedule_block_bh(device->block);
  708. dasd_schedule_device_bh(device);
  709. }
  710. }
  711. }
  712. static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
  713. {
  714. struct alias_pav_group *pavgroup;
  715. struct dasd_device *device, *temp;
  716. struct dasd_eckd_private *private;
  717. unsigned long flags;
  718. LIST_HEAD(active);
  719. /*
  720. * Problem here ist that dasd_flush_device_queue may wait
  721. * for termination of a request to complete. We can't keep
  722. * the lcu lock during that time, so we must assume that
  723. * the lists may have changed.
  724. * Idea: first gather all active alias devices in a separate list,
  725. * then flush the first element of this list unlocked, and afterwards
  726. * check if it is still on the list before moving it to the
  727. * active_devices list.
  728. */
  729. spin_lock_irqsave(&lcu->lock, flags);
  730. list_for_each_entry_safe(device, temp, &lcu->active_devices,
  731. alias_list) {
  732. private = device->private;
  733. if (private->uid.type == UA_BASE_DEVICE)
  734. continue;
  735. list_move(&device->alias_list, &active);
  736. }
  737. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  738. list_splice_init(&pavgroup->aliaslist, &active);
  739. }
  740. while (!list_empty(&active)) {
  741. device = list_first_entry(&active, struct dasd_device,
  742. alias_list);
  743. spin_unlock_irqrestore(&lcu->lock, flags);
  744. dasd_flush_device_queue(device);
  745. spin_lock_irqsave(&lcu->lock, flags);
  746. /*
  747. * only move device around if it wasn't moved away while we
  748. * were waiting for the flush
  749. */
  750. if (device == list_first_entry(&active,
  751. struct dasd_device, alias_list)) {
  752. list_move(&device->alias_list, &lcu->active_devices);
  753. private = device->private;
  754. private->pavgroup = NULL;
  755. }
  756. }
  757. spin_unlock_irqrestore(&lcu->lock, flags);
  758. }
  759. static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
  760. {
  761. struct alias_pav_group *pavgroup;
  762. struct dasd_device *device;
  763. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  764. spin_lock(get_ccwdev_lock(device->cdev));
  765. dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
  766. spin_unlock(get_ccwdev_lock(device->cdev));
  767. }
  768. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  769. spin_lock(get_ccwdev_lock(device->cdev));
  770. dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
  771. spin_unlock(get_ccwdev_lock(device->cdev));
  772. }
  773. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  774. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  775. spin_lock(get_ccwdev_lock(device->cdev));
  776. dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
  777. spin_unlock(get_ccwdev_lock(device->cdev));
  778. }
  779. list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
  780. spin_lock(get_ccwdev_lock(device->cdev));
  781. dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
  782. spin_unlock(get_ccwdev_lock(device->cdev));
  783. }
  784. }
  785. }
  786. static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
  787. {
  788. struct alias_pav_group *pavgroup;
  789. struct dasd_device *device;
  790. list_for_each_entry(device, &lcu->active_devices, alias_list) {
  791. spin_lock(get_ccwdev_lock(device->cdev));
  792. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  793. spin_unlock(get_ccwdev_lock(device->cdev));
  794. }
  795. list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
  796. spin_lock(get_ccwdev_lock(device->cdev));
  797. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  798. spin_unlock(get_ccwdev_lock(device->cdev));
  799. }
  800. list_for_each_entry(pavgroup, &lcu->grouplist, group) {
  801. list_for_each_entry(device, &pavgroup->baselist, alias_list) {
  802. spin_lock(get_ccwdev_lock(device->cdev));
  803. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  804. spin_unlock(get_ccwdev_lock(device->cdev));
  805. }
  806. list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
  807. spin_lock(get_ccwdev_lock(device->cdev));
  808. dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
  809. spin_unlock(get_ccwdev_lock(device->cdev));
  810. }
  811. }
  812. }
  813. static void summary_unit_check_handling_work(struct work_struct *work)
  814. {
  815. struct alias_lcu *lcu;
  816. struct summary_unit_check_work_data *suc_data;
  817. unsigned long flags;
  818. struct dasd_device *device;
  819. suc_data = container_of(work, struct summary_unit_check_work_data,
  820. worker);
  821. lcu = container_of(suc_data, struct alias_lcu, suc_data);
  822. device = suc_data->device;
  823. /* 1. flush alias devices */
  824. flush_all_alias_devices_on_lcu(lcu);
  825. /* 2. reset summary unit check */
  826. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  827. dasd_device_remove_stop_bits(device,
  828. (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
  829. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  830. reset_summary_unit_check(lcu, device, suc_data->reason);
  831. spin_lock_irqsave(&lcu->lock, flags);
  832. _unstop_all_devices_on_lcu(lcu);
  833. _restart_all_base_devices_on_lcu(lcu);
  834. /* 3. read new alias configuration */
  835. _schedule_lcu_update(lcu, device);
  836. lcu->suc_data.device = NULL;
  837. dasd_put_device(device);
  838. spin_unlock_irqrestore(&lcu->lock, flags);
  839. }
  840. void dasd_alias_handle_summary_unit_check(struct work_struct *work)
  841. {
  842. struct dasd_device *device = container_of(work, struct dasd_device,
  843. suc_work);
  844. struct dasd_eckd_private *private = device->private;
  845. struct alias_lcu *lcu;
  846. unsigned long flags;
  847. lcu = private->lcu;
  848. if (!lcu) {
  849. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  850. "device not ready to handle summary"
  851. " unit check (no lcu structure)");
  852. goto out;
  853. }
  854. spin_lock_irqsave(&lcu->lock, flags);
  855. /* If this device is about to be removed just return and wait for
  856. * the next interrupt on a different device
  857. */
  858. if (list_empty(&device->alias_list)) {
  859. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  860. "device is in offline processing,"
  861. " don't do summary unit check handling");
  862. goto out_unlock;
  863. }
  864. if (lcu->suc_data.device) {
  865. /* already scheduled or running */
  866. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  867. "previous instance of summary unit check worker"
  868. " still pending");
  869. goto out_unlock;
  870. }
  871. _stop_all_devices_on_lcu(lcu);
  872. /* prepare for lcu_update */
  873. lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
  874. lcu->suc_data.reason = private->suc_reason;
  875. lcu->suc_data.device = device;
  876. dasd_get_device(device);
  877. if (!schedule_work(&lcu->suc_data.worker))
  878. dasd_put_device(device);
  879. out_unlock:
  880. spin_unlock_irqrestore(&lcu->lock, flags);
  881. out:
  882. clear_bit(DASD_FLAG_SUC, &device->flags);
  883. dasd_put_device(device);
  884. };