sunvdc.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* sunvdc.c: Sun LDOM Virtual Disk Client.
  3. *
  4. * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  5. */
  6. #include <linux/module.h>
  7. #include <linux/kernel.h>
  8. #include <linux/types.h>
  9. #include <linux/blk-mq.h>
  10. #include <linux/hdreg.h>
  11. #include <linux/cdrom.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/completion.h>
  15. #include <linux/delay.h>
  16. #include <linux/init.h>
  17. #include <linux/list.h>
  18. #include <linux/scatterlist.h>
  19. #include <asm/vio.h>
  20. #include <asm/ldc.h>
  21. #define DRV_MODULE_NAME "sunvdc"
  22. #define PFX DRV_MODULE_NAME ": "
  23. #define DRV_MODULE_VERSION "1.2"
  24. #define DRV_MODULE_RELDATE "November 24, 2014"
  25. static char version[] =
  26. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  27. MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
  28. MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
  29. MODULE_LICENSE("GPL");
  30. MODULE_VERSION(DRV_MODULE_VERSION);
  31. #define VDC_TX_RING_SIZE 512
  32. #define VDC_DEFAULT_BLK_SIZE 512
  33. #define MAX_XFER_BLKS (128 * 1024)
  34. #define MAX_XFER_SIZE (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
  35. #define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
  36. #define WAITING_FOR_LINK_UP 0x01
  37. #define WAITING_FOR_TX_SPACE 0x02
  38. #define WAITING_FOR_GEN_CMD 0x04
  39. #define WAITING_FOR_ANY -1
  40. #define VDC_MAX_RETRIES 10
  41. static struct workqueue_struct *sunvdc_wq;
  42. struct vdc_req_entry {
  43. struct request *req;
  44. };
  45. struct vdc_port {
  46. struct vio_driver_state vio;
  47. struct gendisk *disk;
  48. struct vdc_completion *cmp;
  49. u64 req_id;
  50. u64 seq;
  51. struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
  52. unsigned long ring_cookies;
  53. u64 max_xfer_size;
  54. u32 vdisk_block_size;
  55. u32 drain;
  56. u64 ldc_timeout;
  57. struct delayed_work ldc_reset_timer_work;
  58. struct work_struct ldc_reset_work;
  59. /* The server fills these in for us in the disk attribute
  60. * ACK packet.
  61. */
  62. u64 operations;
  63. u32 vdisk_size;
  64. u8 vdisk_type;
  65. u8 vdisk_mtype;
  66. u32 vdisk_phys_blksz;
  67. struct blk_mq_tag_set tag_set;
  68. char disk_name[32];
  69. };
  70. static void vdc_ldc_reset(struct vdc_port *port);
  71. static void vdc_ldc_reset_work(struct work_struct *work);
  72. static void vdc_ldc_reset_timer_work(struct work_struct *work);
  73. static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
  74. {
  75. return container_of(vio, struct vdc_port, vio);
  76. }
  77. /* Ordered from largest major to lowest */
  78. static struct vio_version vdc_versions[] = {
  79. { .major = 1, .minor = 2 },
  80. { .major = 1, .minor = 1 },
  81. { .major = 1, .minor = 0 },
  82. };
  83. static inline int vdc_version_supported(struct vdc_port *port,
  84. u16 major, u16 minor)
  85. {
  86. return port->vio.ver.major == major && port->vio.ver.minor >= minor;
  87. }
  88. #define VDCBLK_NAME "vdisk"
  89. static int vdc_major;
  90. #define PARTITION_SHIFT 3
  91. static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
  92. {
  93. return vio_dring_avail(dr, VDC_TX_RING_SIZE);
  94. }
  95. static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  96. {
  97. struct gendisk *disk = bdev->bd_disk;
  98. sector_t nsect = get_capacity(disk);
  99. sector_t cylinders = nsect;
  100. geo->heads = 0xff;
  101. geo->sectors = 0x3f;
  102. sector_div(cylinders, geo->heads * geo->sectors);
  103. geo->cylinders = cylinders;
  104. if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
  105. geo->cylinders = 0xffff;
  106. return 0;
  107. }
  108. /* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
  109. * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
  110. * Needed to be able to install inside an ldom from an iso image.
  111. */
  112. static int vdc_ioctl(struct block_device *bdev, blk_mode_t mode,
  113. unsigned command, unsigned long argument)
  114. {
  115. struct vdc_port *port = bdev->bd_disk->private_data;
  116. int i;
  117. switch (command) {
  118. case CDROMMULTISESSION:
  119. pr_debug(PFX "Multisession CDs not supported\n");
  120. for (i = 0; i < sizeof(struct cdrom_multisession); i++)
  121. if (put_user(0, (char __user *)(argument + i)))
  122. return -EFAULT;
  123. return 0;
  124. case CDROM_GET_CAPABILITY:
  125. if (!vdc_version_supported(port, 1, 1))
  126. return -EINVAL;
  127. switch (port->vdisk_mtype) {
  128. case VD_MEDIA_TYPE_CD:
  129. case VD_MEDIA_TYPE_DVD:
  130. return 0;
  131. default:
  132. return -EINVAL;
  133. }
  134. default:
  135. pr_debug(PFX "ioctl %08x not supported\n", command);
  136. return -EINVAL;
  137. }
  138. }
  139. static const struct block_device_operations vdc_fops = {
  140. .owner = THIS_MODULE,
  141. .getgeo = vdc_getgeo,
  142. .ioctl = vdc_ioctl,
  143. .compat_ioctl = blkdev_compat_ptr_ioctl,
  144. };
  145. static void vdc_blk_queue_start(struct vdc_port *port)
  146. {
  147. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  148. /* restart blk queue when ring is half emptied. also called after
  149. * handshake completes, so check for initial handshake before we've
  150. * allocated a disk.
  151. */
  152. if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
  153. blk_mq_start_stopped_hw_queues(port->disk->queue, true);
  154. }
  155. static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
  156. {
  157. if (vio->cmp &&
  158. (waiting_for == -1 ||
  159. vio->cmp->waiting_for == waiting_for)) {
  160. vio->cmp->err = err;
  161. complete(&vio->cmp->com);
  162. vio->cmp = NULL;
  163. }
  164. }
  165. static void vdc_handshake_complete(struct vio_driver_state *vio)
  166. {
  167. struct vdc_port *port = to_vdc_port(vio);
  168. cancel_delayed_work(&port->ldc_reset_timer_work);
  169. vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
  170. vdc_blk_queue_start(port);
  171. }
  172. static int vdc_handle_unknown(struct vdc_port *port, void *arg)
  173. {
  174. struct vio_msg_tag *pkt = arg;
  175. printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
  176. pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
  177. printk(KERN_ERR PFX "Resetting connection.\n");
  178. ldc_disconnect(port->vio.lp);
  179. return -ECONNRESET;
  180. }
  181. static int vdc_send_attr(struct vio_driver_state *vio)
  182. {
  183. struct vdc_port *port = to_vdc_port(vio);
  184. struct vio_disk_attr_info pkt;
  185. memset(&pkt, 0, sizeof(pkt));
  186. pkt.tag.type = VIO_TYPE_CTRL;
  187. pkt.tag.stype = VIO_SUBTYPE_INFO;
  188. pkt.tag.stype_env = VIO_ATTR_INFO;
  189. pkt.tag.sid = vio_send_sid(vio);
  190. pkt.xfer_mode = VIO_DRING_MODE;
  191. pkt.vdisk_block_size = port->vdisk_block_size;
  192. pkt.max_xfer_size = port->max_xfer_size;
  193. viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
  194. pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
  195. return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
  196. }
  197. static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
  198. {
  199. struct vdc_port *port = to_vdc_port(vio);
  200. struct vio_disk_attr_info *pkt = arg;
  201. viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
  202. "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
  203. pkt->tag.stype, pkt->operations,
  204. pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
  205. pkt->xfer_mode, pkt->vdisk_block_size,
  206. pkt->max_xfer_size);
  207. if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
  208. switch (pkt->vdisk_type) {
  209. case VD_DISK_TYPE_DISK:
  210. case VD_DISK_TYPE_SLICE:
  211. break;
  212. default:
  213. printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
  214. vio->name, pkt->vdisk_type);
  215. return -ECONNRESET;
  216. }
  217. if (pkt->vdisk_block_size > port->vdisk_block_size) {
  218. printk(KERN_ERR PFX "%s: BLOCK size increased "
  219. "%u --> %u\n",
  220. vio->name,
  221. port->vdisk_block_size, pkt->vdisk_block_size);
  222. return -ECONNRESET;
  223. }
  224. port->operations = pkt->operations;
  225. port->vdisk_type = pkt->vdisk_type;
  226. if (vdc_version_supported(port, 1, 1)) {
  227. port->vdisk_size = pkt->vdisk_size;
  228. port->vdisk_mtype = pkt->vdisk_mtype;
  229. }
  230. if (pkt->max_xfer_size < port->max_xfer_size)
  231. port->max_xfer_size = pkt->max_xfer_size;
  232. port->vdisk_block_size = pkt->vdisk_block_size;
  233. port->vdisk_phys_blksz = VDC_DEFAULT_BLK_SIZE;
  234. if (vdc_version_supported(port, 1, 2))
  235. port->vdisk_phys_blksz = pkt->phys_block_size;
  236. return 0;
  237. } else {
  238. printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
  239. return -ECONNRESET;
  240. }
  241. }
  242. static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
  243. {
  244. int err = desc->status;
  245. vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
  246. }
  247. static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
  248. unsigned int index)
  249. {
  250. struct vio_disk_desc *desc = vio_dring_entry(dr, index);
  251. struct vdc_req_entry *rqe = &port->rq_arr[index];
  252. struct request *req;
  253. if (unlikely(desc->hdr.state != VIO_DESC_DONE))
  254. return;
  255. ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
  256. desc->hdr.state = VIO_DESC_FREE;
  257. dr->cons = vio_dring_next(dr, index);
  258. req = rqe->req;
  259. if (req == NULL) {
  260. vdc_end_special(port, desc);
  261. return;
  262. }
  263. rqe->req = NULL;
  264. blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
  265. vdc_blk_queue_start(port);
  266. }
  267. static int vdc_ack(struct vdc_port *port, void *msgbuf)
  268. {
  269. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  270. struct vio_dring_data *pkt = msgbuf;
  271. if (unlikely(pkt->dring_ident != dr->ident ||
  272. pkt->start_idx != pkt->end_idx ||
  273. pkt->start_idx >= VDC_TX_RING_SIZE))
  274. return 0;
  275. vdc_end_one(port, dr, pkt->start_idx);
  276. return 0;
  277. }
  278. static int vdc_nack(struct vdc_port *port, void *msgbuf)
  279. {
  280. /* XXX Implement me XXX */
  281. return 0;
  282. }
  283. static void vdc_event(void *arg, int event)
  284. {
  285. struct vdc_port *port = arg;
  286. struct vio_driver_state *vio = &port->vio;
  287. unsigned long flags;
  288. int err;
  289. spin_lock_irqsave(&vio->lock, flags);
  290. if (unlikely(event == LDC_EVENT_RESET)) {
  291. vio_link_state_change(vio, event);
  292. queue_work(sunvdc_wq, &port->ldc_reset_work);
  293. goto out;
  294. }
  295. if (unlikely(event == LDC_EVENT_UP)) {
  296. vio_link_state_change(vio, event);
  297. goto out;
  298. }
  299. if (unlikely(event != LDC_EVENT_DATA_READY)) {
  300. pr_warn(PFX "Unexpected LDC event %d\n", event);
  301. goto out;
  302. }
  303. err = 0;
  304. while (1) {
  305. union {
  306. struct vio_msg_tag tag;
  307. u64 raw[8];
  308. } msgbuf;
  309. err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
  310. if (unlikely(err < 0)) {
  311. if (err == -ECONNRESET)
  312. vio_conn_reset(vio);
  313. break;
  314. }
  315. if (err == 0)
  316. break;
  317. viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
  318. msgbuf.tag.type,
  319. msgbuf.tag.stype,
  320. msgbuf.tag.stype_env,
  321. msgbuf.tag.sid);
  322. err = vio_validate_sid(vio, &msgbuf.tag);
  323. if (err < 0)
  324. break;
  325. if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
  326. if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
  327. err = vdc_ack(port, &msgbuf);
  328. else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
  329. err = vdc_nack(port, &msgbuf);
  330. else
  331. err = vdc_handle_unknown(port, &msgbuf);
  332. } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
  333. err = vio_control_pkt_engine(vio, &msgbuf);
  334. } else {
  335. err = vdc_handle_unknown(port, &msgbuf);
  336. }
  337. if (err < 0)
  338. break;
  339. }
  340. if (err < 0)
  341. vdc_finish(&port->vio, err, WAITING_FOR_ANY);
  342. out:
  343. spin_unlock_irqrestore(&vio->lock, flags);
  344. }
  345. static int __vdc_tx_trigger(struct vdc_port *port)
  346. {
  347. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  348. struct vio_dring_data hdr = {
  349. .tag = {
  350. .type = VIO_TYPE_DATA,
  351. .stype = VIO_SUBTYPE_INFO,
  352. .stype_env = VIO_DRING_DATA,
  353. .sid = vio_send_sid(&port->vio),
  354. },
  355. .dring_ident = dr->ident,
  356. .start_idx = dr->prod,
  357. .end_idx = dr->prod,
  358. };
  359. int err, delay;
  360. int retries = 0;
  361. hdr.seq = dr->snd_nxt;
  362. delay = 1;
  363. do {
  364. err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
  365. if (err > 0) {
  366. dr->snd_nxt++;
  367. break;
  368. }
  369. udelay(delay);
  370. if ((delay <<= 1) > 128)
  371. delay = 128;
  372. if (retries++ > VDC_MAX_RETRIES)
  373. break;
  374. } while (err == -EAGAIN);
  375. if (err == -ENOTCONN)
  376. vdc_ldc_reset(port);
  377. return err;
  378. }
  379. static int __send_request(struct request *req)
  380. {
  381. struct vdc_port *port = req->q->disk->private_data;
  382. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  383. struct scatterlist sg[MAX_RING_COOKIES];
  384. struct vdc_req_entry *rqe;
  385. struct vio_disk_desc *desc;
  386. unsigned int map_perm;
  387. int nsg, err, i;
  388. u64 len;
  389. u8 op;
  390. if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
  391. return -EINVAL;
  392. map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
  393. if (rq_data_dir(req) == READ) {
  394. map_perm |= LDC_MAP_W;
  395. op = VD_OP_BREAD;
  396. } else {
  397. map_perm |= LDC_MAP_R;
  398. op = VD_OP_BWRITE;
  399. }
  400. sg_init_table(sg, port->ring_cookies);
  401. nsg = blk_rq_map_sg(req->q, req, sg);
  402. len = 0;
  403. for (i = 0; i < nsg; i++)
  404. len += sg[i].length;
  405. desc = vio_dring_cur(dr);
  406. err = ldc_map_sg(port->vio.lp, sg, nsg,
  407. desc->cookies, port->ring_cookies,
  408. map_perm);
  409. if (err < 0) {
  410. printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
  411. return err;
  412. }
  413. rqe = &port->rq_arr[dr->prod];
  414. rqe->req = req;
  415. desc->hdr.ack = VIO_ACK_ENABLE;
  416. desc->req_id = port->req_id;
  417. desc->operation = op;
  418. if (port->vdisk_type == VD_DISK_TYPE_DISK) {
  419. desc->slice = 0xff;
  420. } else {
  421. desc->slice = 0;
  422. }
  423. desc->status = ~0;
  424. desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
  425. desc->size = len;
  426. desc->ncookies = err;
  427. /* This has to be a non-SMP write barrier because we are writing
  428. * to memory which is shared with the peer LDOM.
  429. */
  430. wmb();
  431. desc->hdr.state = VIO_DESC_READY;
  432. err = __vdc_tx_trigger(port);
  433. if (err < 0) {
  434. printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
  435. } else {
  436. port->req_id++;
  437. dr->prod = vio_dring_next(dr, dr->prod);
  438. }
  439. return err;
  440. }
  441. static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
  442. const struct blk_mq_queue_data *bd)
  443. {
  444. struct vdc_port *port = hctx->queue->queuedata;
  445. struct vio_dring_state *dr;
  446. unsigned long flags;
  447. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  448. blk_mq_start_request(bd->rq);
  449. spin_lock_irqsave(&port->vio.lock, flags);
  450. /*
  451. * Doing drain, just end the request in error
  452. */
  453. if (unlikely(port->drain)) {
  454. spin_unlock_irqrestore(&port->vio.lock, flags);
  455. return BLK_STS_IOERR;
  456. }
  457. if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
  458. spin_unlock_irqrestore(&port->vio.lock, flags);
  459. blk_mq_stop_hw_queue(hctx);
  460. return BLK_STS_DEV_RESOURCE;
  461. }
  462. if (__send_request(bd->rq) < 0) {
  463. spin_unlock_irqrestore(&port->vio.lock, flags);
  464. return BLK_STS_IOERR;
  465. }
  466. spin_unlock_irqrestore(&port->vio.lock, flags);
  467. return BLK_STS_OK;
  468. }
  469. static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
  470. {
  471. struct vio_dring_state *dr;
  472. struct vio_completion comp;
  473. struct vio_disk_desc *desc;
  474. unsigned int map_perm;
  475. unsigned long flags;
  476. int op_len, err;
  477. void *req_buf;
  478. if (!(((u64)1 << (u64)op) & port->operations))
  479. return -EOPNOTSUPP;
  480. switch (op) {
  481. case VD_OP_BREAD:
  482. case VD_OP_BWRITE:
  483. default:
  484. return -EINVAL;
  485. case VD_OP_FLUSH:
  486. op_len = 0;
  487. map_perm = 0;
  488. break;
  489. case VD_OP_GET_WCE:
  490. op_len = sizeof(u32);
  491. map_perm = LDC_MAP_W;
  492. break;
  493. case VD_OP_SET_WCE:
  494. op_len = sizeof(u32);
  495. map_perm = LDC_MAP_R;
  496. break;
  497. case VD_OP_GET_VTOC:
  498. op_len = sizeof(struct vio_disk_vtoc);
  499. map_perm = LDC_MAP_W;
  500. break;
  501. case VD_OP_SET_VTOC:
  502. op_len = sizeof(struct vio_disk_vtoc);
  503. map_perm = LDC_MAP_R;
  504. break;
  505. case VD_OP_GET_DISKGEOM:
  506. op_len = sizeof(struct vio_disk_geom);
  507. map_perm = LDC_MAP_W;
  508. break;
  509. case VD_OP_SET_DISKGEOM:
  510. op_len = sizeof(struct vio_disk_geom);
  511. map_perm = LDC_MAP_R;
  512. break;
  513. case VD_OP_SCSICMD:
  514. op_len = 16;
  515. map_perm = LDC_MAP_RW;
  516. break;
  517. case VD_OP_GET_DEVID:
  518. op_len = sizeof(struct vio_disk_devid);
  519. map_perm = LDC_MAP_W;
  520. break;
  521. case VD_OP_GET_EFI:
  522. case VD_OP_SET_EFI:
  523. return -EOPNOTSUPP;
  524. }
  525. map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
  526. op_len = (op_len + 7) & ~7;
  527. req_buf = kzalloc(op_len, GFP_KERNEL);
  528. if (!req_buf)
  529. return -ENOMEM;
  530. if (len > op_len)
  531. len = op_len;
  532. if (map_perm & LDC_MAP_R)
  533. memcpy(req_buf, buf, len);
  534. spin_lock_irqsave(&port->vio.lock, flags);
  535. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  536. /* XXX If we want to use this code generically we have to
  537. * XXX handle TX ring exhaustion etc.
  538. */
  539. desc = vio_dring_cur(dr);
  540. err = ldc_map_single(port->vio.lp, req_buf, op_len,
  541. desc->cookies, port->ring_cookies,
  542. map_perm);
  543. if (err < 0) {
  544. spin_unlock_irqrestore(&port->vio.lock, flags);
  545. kfree(req_buf);
  546. return err;
  547. }
  548. init_completion(&comp.com);
  549. comp.waiting_for = WAITING_FOR_GEN_CMD;
  550. port->vio.cmp = &comp;
  551. desc->hdr.ack = VIO_ACK_ENABLE;
  552. desc->req_id = port->req_id;
  553. desc->operation = op;
  554. desc->slice = 0;
  555. desc->status = ~0;
  556. desc->offset = 0;
  557. desc->size = op_len;
  558. desc->ncookies = err;
  559. /* This has to be a non-SMP write barrier because we are writing
  560. * to memory which is shared with the peer LDOM.
  561. */
  562. wmb();
  563. desc->hdr.state = VIO_DESC_READY;
  564. err = __vdc_tx_trigger(port);
  565. if (err >= 0) {
  566. port->req_id++;
  567. dr->prod = vio_dring_next(dr, dr->prod);
  568. spin_unlock_irqrestore(&port->vio.lock, flags);
  569. wait_for_completion(&comp.com);
  570. err = comp.err;
  571. } else {
  572. port->vio.cmp = NULL;
  573. spin_unlock_irqrestore(&port->vio.lock, flags);
  574. }
  575. if (map_perm & LDC_MAP_W)
  576. memcpy(buf, req_buf, len);
  577. kfree(req_buf);
  578. return err;
  579. }
  580. static int vdc_alloc_tx_ring(struct vdc_port *port)
  581. {
  582. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  583. unsigned long len, entry_size;
  584. int ncookies;
  585. void *dring;
  586. entry_size = sizeof(struct vio_disk_desc) +
  587. (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
  588. len = (VDC_TX_RING_SIZE * entry_size);
  589. ncookies = VIO_MAX_RING_COOKIES;
  590. dring = ldc_alloc_exp_dring(port->vio.lp, len,
  591. dr->cookies, &ncookies,
  592. (LDC_MAP_SHADOW |
  593. LDC_MAP_DIRECT |
  594. LDC_MAP_RW));
  595. if (IS_ERR(dring))
  596. return PTR_ERR(dring);
  597. dr->base = dring;
  598. dr->entry_size = entry_size;
  599. dr->num_entries = VDC_TX_RING_SIZE;
  600. dr->prod = dr->cons = 0;
  601. dr->pending = VDC_TX_RING_SIZE;
  602. dr->ncookies = ncookies;
  603. return 0;
  604. }
  605. static void vdc_free_tx_ring(struct vdc_port *port)
  606. {
  607. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  608. if (dr->base) {
  609. ldc_free_exp_dring(port->vio.lp, dr->base,
  610. (dr->entry_size * dr->num_entries),
  611. dr->cookies, dr->ncookies);
  612. dr->base = NULL;
  613. dr->entry_size = 0;
  614. dr->num_entries = 0;
  615. dr->pending = 0;
  616. dr->ncookies = 0;
  617. }
  618. }
  619. static int vdc_port_up(struct vdc_port *port)
  620. {
  621. struct vio_completion comp;
  622. init_completion(&comp.com);
  623. comp.err = 0;
  624. comp.waiting_for = WAITING_FOR_LINK_UP;
  625. port->vio.cmp = &comp;
  626. vio_port_up(&port->vio);
  627. wait_for_completion(&comp.com);
  628. return comp.err;
  629. }
  630. static void vdc_port_down(struct vdc_port *port)
  631. {
  632. ldc_disconnect(port->vio.lp);
  633. ldc_unbind(port->vio.lp);
  634. vdc_free_tx_ring(port);
  635. vio_ldc_free(&port->vio);
  636. }
  637. static const struct blk_mq_ops vdc_mq_ops = {
  638. .queue_rq = vdc_queue_rq,
  639. };
  640. static int probe_disk(struct vdc_port *port)
  641. {
  642. struct queue_limits lim = {
  643. .physical_block_size = port->vdisk_phys_blksz,
  644. .max_hw_sectors = port->max_xfer_size,
  645. /* Each segment in a request is up to an aligned page in size. */
  646. .seg_boundary_mask = PAGE_SIZE - 1,
  647. .max_segment_size = PAGE_SIZE,
  648. .max_segments = port->ring_cookies,
  649. .features = BLK_FEAT_ROTATIONAL,
  650. };
  651. struct request_queue *q;
  652. struct gendisk *g;
  653. int err;
  654. err = vdc_port_up(port);
  655. if (err)
  656. return err;
  657. /* Using version 1.2 means vdisk_phys_blksz should be set unless the
  658. * disk is reserved by another system.
  659. */
  660. if (vdc_version_supported(port, 1, 2) && !port->vdisk_phys_blksz)
  661. return -ENODEV;
  662. if (vdc_version_supported(port, 1, 1)) {
  663. /* vdisk_size should be set during the handshake, if it wasn't
  664. * then the underlying disk is reserved by another system
  665. */
  666. if (port->vdisk_size == -1)
  667. return -ENODEV;
  668. } else {
  669. struct vio_disk_geom geom;
  670. err = generic_request(port, VD_OP_GET_DISKGEOM,
  671. &geom, sizeof(geom));
  672. if (err < 0) {
  673. printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
  674. "error %d\n", err);
  675. return err;
  676. }
  677. port->vdisk_size = ((u64)geom.num_cyl *
  678. (u64)geom.num_hd *
  679. (u64)geom.num_sec);
  680. }
  681. err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
  682. VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
  683. if (err)
  684. return err;
  685. g = blk_mq_alloc_disk(&port->tag_set, &lim, port);
  686. if (IS_ERR(g)) {
  687. printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
  688. port->vio.name);
  689. err = PTR_ERR(g);
  690. goto out_free_tag;
  691. }
  692. port->disk = g;
  693. q = g->queue;
  694. g->major = vdc_major;
  695. g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
  696. g->minors = 1 << PARTITION_SHIFT;
  697. strcpy(g->disk_name, port->disk_name);
  698. g->fops = &vdc_fops;
  699. g->queue = q;
  700. g->private_data = port;
  701. set_capacity(g, port->vdisk_size);
  702. if (vdc_version_supported(port, 1, 1)) {
  703. switch (port->vdisk_mtype) {
  704. case VD_MEDIA_TYPE_CD:
  705. pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
  706. g->flags |= GENHD_FL_REMOVABLE;
  707. set_disk_ro(g, 1);
  708. break;
  709. case VD_MEDIA_TYPE_DVD:
  710. pr_info(PFX "Virtual DVD %s\n", port->disk_name);
  711. g->flags |= GENHD_FL_REMOVABLE;
  712. set_disk_ro(g, 1);
  713. break;
  714. case VD_MEDIA_TYPE_FIXED:
  715. pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
  716. break;
  717. }
  718. }
  719. pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
  720. g->disk_name,
  721. port->vdisk_size, (port->vdisk_size >> (20 - 9)),
  722. port->vio.ver.major, port->vio.ver.minor);
  723. err = device_add_disk(&port->vio.vdev->dev, g, NULL);
  724. if (err)
  725. goto out_cleanup_disk;
  726. return 0;
  727. out_cleanup_disk:
  728. put_disk(g);
  729. out_free_tag:
  730. blk_mq_free_tag_set(&port->tag_set);
  731. return err;
  732. }
  733. static struct ldc_channel_config vdc_ldc_cfg = {
  734. .event = vdc_event,
  735. .mtu = 64,
  736. .mode = LDC_MODE_UNRELIABLE,
  737. };
  738. static struct vio_driver_ops vdc_vio_ops = {
  739. .send_attr = vdc_send_attr,
  740. .handle_attr = vdc_handle_attr,
  741. .handshake_complete = vdc_handshake_complete,
  742. };
  743. static void print_version(void)
  744. {
  745. static int version_printed;
  746. if (version_printed++ == 0)
  747. printk(KERN_INFO "%s", version);
  748. }
  749. struct vdc_check_port_data {
  750. int dev_no;
  751. char *type;
  752. };
  753. static int vdc_device_probed(struct device *dev, void *arg)
  754. {
  755. struct vio_dev *vdev = to_vio_dev(dev);
  756. struct vdc_check_port_data *port_data;
  757. port_data = (struct vdc_check_port_data *)arg;
  758. if ((vdev->dev_no == port_data->dev_no) &&
  759. (!(strcmp((char *)&vdev->type, port_data->type))) &&
  760. dev_get_drvdata(dev)) {
  761. /* This device has already been configured
  762. * by vdc_port_probe()
  763. */
  764. return 1;
  765. } else {
  766. return 0;
  767. }
  768. }
  769. /* Determine whether the VIO device is part of an mpgroup
  770. * by locating all the virtual-device-port nodes associated
  771. * with the parent virtual-device node for the VIO device
  772. * and checking whether any of these nodes are vdc-ports
  773. * which have already been configured.
  774. *
  775. * Returns true if this device is part of an mpgroup and has
  776. * already been probed.
  777. */
  778. static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
  779. {
  780. struct vdc_check_port_data port_data;
  781. struct device *dev;
  782. port_data.dev_no = vdev->dev_no;
  783. port_data.type = (char *)&vdev->type;
  784. dev = device_find_child(vdev->dev.parent, &port_data,
  785. vdc_device_probed);
  786. if (dev)
  787. return true;
  788. return false;
  789. }
  790. static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  791. {
  792. struct mdesc_handle *hp;
  793. struct vdc_port *port;
  794. int err;
  795. const u64 *ldc_timeout;
  796. print_version();
  797. hp = mdesc_grab();
  798. if (!hp)
  799. return -ENODEV;
  800. err = -ENODEV;
  801. if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
  802. printk(KERN_ERR PFX "Port id [%llu] too large.\n",
  803. vdev->dev_no);
  804. goto err_out_release_mdesc;
  805. }
  806. /* Check if this device is part of an mpgroup */
  807. if (vdc_port_mpgroup_check(vdev)) {
  808. printk(KERN_WARNING
  809. "VIO: Ignoring extra vdisk port %s",
  810. dev_name(&vdev->dev));
  811. goto err_out_release_mdesc;
  812. }
  813. port = kzalloc(sizeof(*port), GFP_KERNEL);
  814. if (!port) {
  815. err = -ENOMEM;
  816. goto err_out_release_mdesc;
  817. }
  818. if (vdev->dev_no >= 26)
  819. snprintf(port->disk_name, sizeof(port->disk_name),
  820. VDCBLK_NAME "%c%c",
  821. 'a' + ((int)vdev->dev_no / 26) - 1,
  822. 'a' + ((int)vdev->dev_no % 26));
  823. else
  824. snprintf(port->disk_name, sizeof(port->disk_name),
  825. VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
  826. port->vdisk_size = -1;
  827. /* Actual wall time may be double due to do_generic_file_read() doing
  828. * a readahead I/O first, and once that fails it will try to read a
  829. * single page.
  830. */
  831. ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
  832. port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
  833. INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
  834. INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
  835. err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
  836. vdc_versions, ARRAY_SIZE(vdc_versions),
  837. &vdc_vio_ops, port->disk_name);
  838. if (err)
  839. goto err_out_free_port;
  840. port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
  841. port->max_xfer_size = MAX_XFER_SIZE;
  842. port->ring_cookies = MAX_RING_COOKIES;
  843. err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
  844. if (err)
  845. goto err_out_free_port;
  846. err = vdc_alloc_tx_ring(port);
  847. if (err)
  848. goto err_out_free_ldc;
  849. err = probe_disk(port);
  850. if (err)
  851. goto err_out_free_tx_ring;
  852. /* Note that the device driver_data is used to determine
  853. * whether the port has been probed.
  854. */
  855. dev_set_drvdata(&vdev->dev, port);
  856. mdesc_release(hp);
  857. return 0;
  858. err_out_free_tx_ring:
  859. vdc_free_tx_ring(port);
  860. err_out_free_ldc:
  861. vio_ldc_free(&port->vio);
  862. err_out_free_port:
  863. kfree(port);
  864. err_out_release_mdesc:
  865. mdesc_release(hp);
  866. return err;
  867. }
  868. static void vdc_port_remove(struct vio_dev *vdev)
  869. {
  870. struct vdc_port *port = dev_get_drvdata(&vdev->dev);
  871. if (port) {
  872. blk_mq_stop_hw_queues(port->disk->queue);
  873. flush_work(&port->ldc_reset_work);
  874. cancel_delayed_work_sync(&port->ldc_reset_timer_work);
  875. del_timer_sync(&port->vio.timer);
  876. del_gendisk(port->disk);
  877. put_disk(port->disk);
  878. blk_mq_free_tag_set(&port->tag_set);
  879. vdc_free_tx_ring(port);
  880. vio_ldc_free(&port->vio);
  881. dev_set_drvdata(&vdev->dev, NULL);
  882. kfree(port);
  883. }
  884. }
  885. static void vdc_requeue_inflight(struct vdc_port *port)
  886. {
  887. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  888. u32 idx;
  889. for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
  890. struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
  891. struct vdc_req_entry *rqe = &port->rq_arr[idx];
  892. struct request *req;
  893. ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
  894. desc->hdr.state = VIO_DESC_FREE;
  895. dr->cons = vio_dring_next(dr, idx);
  896. req = rqe->req;
  897. if (req == NULL) {
  898. vdc_end_special(port, desc);
  899. continue;
  900. }
  901. rqe->req = NULL;
  902. blk_mq_requeue_request(req, false);
  903. }
  904. }
  905. static void vdc_queue_drain(struct vdc_port *port)
  906. {
  907. struct request_queue *q = port->disk->queue;
  908. /*
  909. * Mark the queue as draining, then freeze/quiesce to ensure
  910. * that all existing requests are seen in ->queue_rq() and killed
  911. */
  912. port->drain = 1;
  913. spin_unlock_irq(&port->vio.lock);
  914. blk_mq_freeze_queue(q);
  915. blk_mq_quiesce_queue(q);
  916. spin_lock_irq(&port->vio.lock);
  917. port->drain = 0;
  918. blk_mq_unquiesce_queue(q);
  919. blk_mq_unfreeze_queue(q);
  920. }
  921. static void vdc_ldc_reset_timer_work(struct work_struct *work)
  922. {
  923. struct vdc_port *port;
  924. struct vio_driver_state *vio;
  925. port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
  926. vio = &port->vio;
  927. spin_lock_irq(&vio->lock);
  928. if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
  929. pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
  930. port->disk_name, port->ldc_timeout);
  931. vdc_queue_drain(port);
  932. vdc_blk_queue_start(port);
  933. }
  934. spin_unlock_irq(&vio->lock);
  935. }
  936. static void vdc_ldc_reset_work(struct work_struct *work)
  937. {
  938. struct vdc_port *port;
  939. struct vio_driver_state *vio;
  940. unsigned long flags;
  941. port = container_of(work, struct vdc_port, ldc_reset_work);
  942. vio = &port->vio;
  943. spin_lock_irqsave(&vio->lock, flags);
  944. vdc_ldc_reset(port);
  945. spin_unlock_irqrestore(&vio->lock, flags);
  946. }
  947. static void vdc_ldc_reset(struct vdc_port *port)
  948. {
  949. int err;
  950. assert_spin_locked(&port->vio.lock);
  951. pr_warn(PFX "%s ldc link reset\n", port->disk_name);
  952. blk_mq_stop_hw_queues(port->disk->queue);
  953. vdc_requeue_inflight(port);
  954. vdc_port_down(port);
  955. err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
  956. if (err) {
  957. pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
  958. return;
  959. }
  960. err = vdc_alloc_tx_ring(port);
  961. if (err) {
  962. pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
  963. goto err_free_ldc;
  964. }
  965. if (port->ldc_timeout)
  966. mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
  967. round_jiffies(jiffies + HZ * port->ldc_timeout));
  968. mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
  969. return;
  970. err_free_ldc:
  971. vio_ldc_free(&port->vio);
  972. }
  973. static const struct vio_device_id vdc_port_match[] = {
  974. {
  975. .type = "vdc-port",
  976. },
  977. {},
  978. };
  979. MODULE_DEVICE_TABLE(vio, vdc_port_match);
  980. static struct vio_driver vdc_port_driver = {
  981. .id_table = vdc_port_match,
  982. .probe = vdc_port_probe,
  983. .remove = vdc_port_remove,
  984. .name = "vdc_port",
  985. };
  986. static int __init vdc_init(void)
  987. {
  988. int err;
  989. sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
  990. if (!sunvdc_wq)
  991. return -ENOMEM;
  992. err = register_blkdev(0, VDCBLK_NAME);
  993. if (err < 0)
  994. goto out_free_wq;
  995. vdc_major = err;
  996. err = vio_register_driver(&vdc_port_driver);
  997. if (err)
  998. goto out_unregister_blkdev;
  999. return 0;
  1000. out_unregister_blkdev:
  1001. unregister_blkdev(vdc_major, VDCBLK_NAME);
  1002. vdc_major = 0;
  1003. out_free_wq:
  1004. destroy_workqueue(sunvdc_wq);
  1005. return err;
  1006. }
  1007. static void __exit vdc_exit(void)
  1008. {
  1009. vio_unregister_driver(&vdc_port_driver);
  1010. unregister_blkdev(vdc_major, VDCBLK_NAME);
  1011. destroy_workqueue(sunvdc_wq);
  1012. }
  1013. module_init(vdc_init);
  1014. module_exit(vdc_exit);