share.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214
  1. /*
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/threads.h>
  21. #include <linux/parport.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ioport.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/sched/signal.h>
  29. #include <linux/kmod.h>
  30. #include <linux/device.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mutex.h>
  33. #include <asm/irq.h>
  34. #undef PARPORT_PARANOID
  35. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  36. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  37. int parport_default_spintime = DEFAULT_SPIN_TIME;
  38. static LIST_HEAD(portlist);
  39. static DEFINE_SPINLOCK(parportlist_lock);
  40. /* list of all allocated ports, sorted by ->number */
  41. static LIST_HEAD(all_ports);
  42. static DEFINE_SPINLOCK(full_list_lock);
  43. static DEFINE_MUTEX(registration_lock);
  44. /* What you can do to a port that's gone away.. */
  45. static void dead_write_lines(struct parport *p, unsigned char b){}
  46. static unsigned char dead_read_lines(struct parport *p) { return 0; }
  47. static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
  48. unsigned char c) { return 0; }
  49. static void dead_onearg(struct parport *p){}
  50. static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
  51. static void dead_state(struct parport *p, struct parport_state *s) { }
  52. static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
  53. { return 0; }
  54. static size_t dead_read(struct parport *p, void *b, size_t l, int f)
  55. { return 0; }
  56. static struct parport_operations dead_ops = {
  57. .write_data = dead_write_lines, /* data */
  58. .read_data = dead_read_lines,
  59. .write_control = dead_write_lines, /* control */
  60. .read_control = dead_read_lines,
  61. .frob_control = dead_frob_lines,
  62. .read_status = dead_read_lines, /* status */
  63. .enable_irq = dead_onearg, /* enable_irq */
  64. .disable_irq = dead_onearg, /* disable_irq */
  65. .data_forward = dead_onearg, /* data_forward */
  66. .data_reverse = dead_onearg, /* data_reverse */
  67. .init_state = dead_initstate, /* init_state */
  68. .save_state = dead_state,
  69. .restore_state = dead_state,
  70. .epp_write_data = dead_write, /* epp */
  71. .epp_read_data = dead_read,
  72. .epp_write_addr = dead_write,
  73. .epp_read_addr = dead_read,
  74. .ecp_write_data = dead_write, /* ecp */
  75. .ecp_read_data = dead_read,
  76. .ecp_write_addr = dead_write,
  77. .compat_write_data = dead_write, /* compat */
  78. .nibble_read_data = dead_read, /* nibble */
  79. .byte_read_data = dead_read, /* byte */
  80. .owner = NULL,
  81. };
  82. static struct device_type parport_device_type = {
  83. .name = "parport",
  84. };
  85. static int is_parport(struct device *dev)
  86. {
  87. return dev->type == &parport_device_type;
  88. }
  89. static int parport_probe(struct device *dev)
  90. {
  91. struct parport_driver *drv;
  92. if (is_parport(dev))
  93. return -ENODEV;
  94. drv = to_parport_driver(dev->driver);
  95. if (!drv->probe) {
  96. /* if driver has not defined a custom probe */
  97. struct pardevice *par_dev = to_pardevice(dev);
  98. if (strcmp(par_dev->name, drv->name))
  99. return -ENODEV;
  100. return 0;
  101. }
  102. /* if driver defined its own probe */
  103. return drv->probe(to_pardevice(dev));
  104. }
  105. static const struct bus_type parport_bus_type = {
  106. .name = "parport",
  107. .probe = parport_probe,
  108. };
  109. int parport_bus_init(void)
  110. {
  111. return bus_register(&parport_bus_type);
  112. }
  113. void parport_bus_exit(void)
  114. {
  115. bus_unregister(&parport_bus_type);
  116. }
  117. /*
  118. * iterates through all the drivers registered with the bus and sends the port
  119. * details to the match_port callback of the driver, so that the driver can
  120. * know about the new port that just registered with the bus and decide if it
  121. * wants to use this new port.
  122. */
  123. static int driver_check(struct device_driver *dev_drv, void *_port)
  124. {
  125. struct parport *port = _port;
  126. struct parport_driver *drv = to_parport_driver(dev_drv);
  127. if (drv->match_port)
  128. drv->match_port(port);
  129. return 0;
  130. }
  131. /* Call attach(port) for each registered driver. */
  132. static void attach_driver_chain(struct parport *port)
  133. {
  134. /* caller has exclusive registration_lock */
  135. /*
  136. * call the driver_check function of the drivers registered in
  137. * new device model
  138. */
  139. bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
  140. }
  141. static int driver_detach(struct device_driver *_drv, void *_port)
  142. {
  143. struct parport *port = _port;
  144. struct parport_driver *drv = to_parport_driver(_drv);
  145. if (drv->detach)
  146. drv->detach(port);
  147. return 0;
  148. }
  149. /* Call detach(port) for each registered driver. */
  150. static void detach_driver_chain(struct parport *port)
  151. {
  152. /* caller has exclusive registration_lock */
  153. /*
  154. * call the detach function of the drivers registered in
  155. * new device model
  156. */
  157. bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
  158. }
  159. /* Ask kmod for some lowlevel drivers. */
  160. static void get_lowlevel_driver(void)
  161. {
  162. /*
  163. * There is no actual module called this: you should set
  164. * up an alias for modutils.
  165. */
  166. request_module("parport_lowlevel");
  167. }
  168. /*
  169. * iterates through all the devices connected to the bus and sends the device
  170. * details to the match_port callback of the driver, so that the driver can
  171. * know what are all the ports that are connected to the bus and choose the
  172. * port to which it wants to register its device.
  173. */
  174. static int port_check(struct device *dev, void *dev_drv)
  175. {
  176. struct parport_driver *drv = dev_drv;
  177. /* only send ports, do not send other devices connected to bus */
  178. if (is_parport(dev))
  179. drv->match_port(to_parport_dev(dev));
  180. return 0;
  181. }
  182. /*
  183. * Iterates through all the devices connected to the bus and return 1
  184. * if the device is a parallel port.
  185. */
  186. static int port_detect(struct device *dev, void *dev_drv)
  187. {
  188. if (is_parport(dev))
  189. return 1;
  190. return 0;
  191. }
  192. /**
  193. * __parport_register_driver - register a parallel port device driver
  194. * @drv: structure describing the driver
  195. * @owner: owner module of drv
  196. * @mod_name: module name string
  197. *
  198. * This can be called by a parallel port device driver in order
  199. * to receive notifications about ports being found in the
  200. * system, as well as ports no longer available.
  201. *
  202. * If devmodel is true then the new device model is used
  203. * for registration.
  204. *
  205. * The @drv structure is allocated by the caller and must not be
  206. * deallocated until after calling parport_unregister_driver().
  207. *
  208. * If using the non device model:
  209. * The driver's attach() function may block. The port that
  210. * attach() is given will be valid for the duration of the
  211. * callback, but if the driver wants to take a copy of the
  212. * pointer it must call parport_get_port() to do so. Calling
  213. * parport_register_device() on that port will do this for you.
  214. *
  215. * The driver's detach() function may block. The port that
  216. * detach() is given will be valid for the duration of the
  217. * callback, but if the driver wants to take a copy of the
  218. * pointer it must call parport_get_port() to do so.
  219. *
  220. *
  221. * Returns 0 on success. The non device model will always succeeds.
  222. * but the new device model can fail and will return the error code.
  223. **/
  224. int __parport_register_driver(struct parport_driver *drv, struct module *owner,
  225. const char *mod_name)
  226. {
  227. /* using device model */
  228. int ret;
  229. /* initialize common driver fields */
  230. drv->driver.name = drv->name;
  231. drv->driver.bus = &parport_bus_type;
  232. drv->driver.owner = owner;
  233. drv->driver.mod_name = mod_name;
  234. ret = driver_register(&drv->driver);
  235. if (ret)
  236. return ret;
  237. /*
  238. * check if bus has any parallel port registered, if
  239. * none is found then load the lowlevel driver.
  240. */
  241. ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
  242. port_detect);
  243. if (!ret)
  244. get_lowlevel_driver();
  245. mutex_lock(&registration_lock);
  246. if (drv->match_port)
  247. bus_for_each_dev(&parport_bus_type, NULL, drv,
  248. port_check);
  249. mutex_unlock(&registration_lock);
  250. return 0;
  251. }
  252. EXPORT_SYMBOL(__parport_register_driver);
  253. static int port_detach(struct device *dev, void *_drv)
  254. {
  255. struct parport_driver *drv = _drv;
  256. if (is_parport(dev) && drv->detach)
  257. drv->detach(to_parport_dev(dev));
  258. return 0;
  259. }
  260. /**
  261. * parport_unregister_driver - deregister a parallel port device driver
  262. * @drv: structure describing the driver that was given to
  263. * parport_register_driver()
  264. *
  265. * This should be called by a parallel port device driver that
  266. * has registered itself using parport_register_driver() when it
  267. * is about to be unloaded.
  268. *
  269. * When it returns, the driver's attach() routine will no longer
  270. * be called, and for each port that attach() was called for, the
  271. * detach() routine will have been called.
  272. *
  273. * All the driver's attach() and detach() calls are guaranteed to have
  274. * finished by the time this function returns.
  275. **/
  276. void parport_unregister_driver(struct parport_driver *drv)
  277. {
  278. mutex_lock(&registration_lock);
  279. bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
  280. driver_unregister(&drv->driver);
  281. mutex_unlock(&registration_lock);
  282. }
  283. EXPORT_SYMBOL(parport_unregister_driver);
  284. static void free_port(struct device *dev)
  285. {
  286. int d;
  287. struct parport *port = to_parport_dev(dev);
  288. spin_lock(&full_list_lock);
  289. list_del(&port->full_list);
  290. spin_unlock(&full_list_lock);
  291. for (d = 0; d < 5; d++) {
  292. kfree(port->probe_info[d].class_name);
  293. kfree(port->probe_info[d].mfr);
  294. kfree(port->probe_info[d].model);
  295. kfree(port->probe_info[d].cmdset);
  296. kfree(port->probe_info[d].description);
  297. }
  298. kfree(port);
  299. }
  300. /**
  301. * parport_get_port - increment a port's reference count
  302. * @port: the port
  303. *
  304. * This ensures that a struct parport pointer remains valid
  305. * until the matching parport_put_port() call.
  306. **/
  307. struct parport *parport_get_port(struct parport *port)
  308. {
  309. struct device *dev = get_device(&port->bus_dev);
  310. return to_parport_dev(dev);
  311. }
  312. EXPORT_SYMBOL(parport_get_port);
  313. void parport_del_port(struct parport *port)
  314. {
  315. device_unregister(&port->bus_dev);
  316. }
  317. EXPORT_SYMBOL(parport_del_port);
  318. /**
  319. * parport_put_port - decrement a port's reference count
  320. * @port: the port
  321. *
  322. * This should be called once for each call to parport_get_port(),
  323. * once the port is no longer needed. When the reference count reaches
  324. * zero (port is no longer used), free_port is called.
  325. **/
  326. void parport_put_port(struct parport *port)
  327. {
  328. put_device(&port->bus_dev);
  329. }
  330. EXPORT_SYMBOL(parport_put_port);
  331. /**
  332. * parport_register_port - register a parallel port
  333. * @base: base I/O address
  334. * @irq: IRQ line
  335. * @dma: DMA channel
  336. * @ops: pointer to the port driver's port operations structure
  337. *
  338. * When a parallel port (lowlevel) driver finds a port that
  339. * should be made available to parallel port device drivers, it
  340. * should call parport_register_port(). The @base, @irq, and
  341. * @dma parameters are for the convenience of port drivers, and
  342. * for ports where they aren't meaningful needn't be set to
  343. * anything special. They can be altered afterwards by adjusting
  344. * the relevant members of the parport structure that is returned
  345. * and represents the port. They should not be tampered with
  346. * after calling parport_announce_port, however.
  347. *
  348. * If there are parallel port device drivers in the system that
  349. * have registered themselves using parport_register_driver(),
  350. * they are not told about the port at this time; that is done by
  351. * parport_announce_port().
  352. *
  353. * The @ops structure is allocated by the caller, and must not be
  354. * deallocated before calling parport_remove_port().
  355. *
  356. * If there is no memory to allocate a new parport structure,
  357. * this function will return %NULL.
  358. **/
  359. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  360. struct parport_operations *ops)
  361. {
  362. struct list_head *l;
  363. struct parport *tmp;
  364. int num;
  365. int device;
  366. int ret;
  367. tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
  368. if (!tmp)
  369. return NULL;
  370. /* Init our structure */
  371. tmp->base = base;
  372. tmp->irq = irq;
  373. tmp->dma = dma;
  374. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  375. INIT_LIST_HEAD(&tmp->list);
  376. tmp->ops = ops;
  377. tmp->physport = tmp;
  378. rwlock_init(&tmp->cad_lock);
  379. spin_lock_init(&tmp->waitlist_lock);
  380. spin_lock_init(&tmp->pardevice_lock);
  381. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  382. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  383. sema_init(&tmp->ieee1284.irq, 0);
  384. tmp->spintime = parport_default_spintime;
  385. atomic_set(&tmp->ref_count, 1);
  386. /* Search for the lowest free parport number. */
  387. spin_lock(&full_list_lock);
  388. num = 0;
  389. list_for_each(l, &all_ports) {
  390. struct parport *p = list_entry(l, struct parport, full_list);
  391. if (p->number != num++)
  392. break;
  393. }
  394. tmp->portnum = tmp->number = num;
  395. list_add_tail(&tmp->full_list, l);
  396. spin_unlock(&full_list_lock);
  397. /*
  398. * Now that the portnum is known finish doing the Init.
  399. */
  400. dev_set_name(&tmp->bus_dev, "parport%d", tmp->portnum);
  401. tmp->bus_dev.bus = &parport_bus_type;
  402. tmp->bus_dev.release = free_port;
  403. tmp->bus_dev.type = &parport_device_type;
  404. tmp->name = dev_name(&tmp->bus_dev);
  405. for (device = 0; device < 5; device++)
  406. /* assume the worst */
  407. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  408. ret = device_register(&tmp->bus_dev);
  409. if (ret) {
  410. put_device(&tmp->bus_dev);
  411. return NULL;
  412. }
  413. return tmp;
  414. }
  415. EXPORT_SYMBOL(parport_register_port);
  416. /**
  417. * parport_announce_port - tell device drivers about a parallel port
  418. * @port: parallel port to announce
  419. *
  420. * After a port driver has registered a parallel port with
  421. * parport_register_port, and performed any necessary
  422. * initialisation or adjustments, it should call
  423. * parport_announce_port() in order to notify all device drivers
  424. * that have called parport_register_driver(). Their attach()
  425. * functions will be called, with @port as the parameter.
  426. **/
  427. void parport_announce_port(struct parport *port)
  428. {
  429. int i;
  430. #ifdef CONFIG_PARPORT_1284
  431. /* Analyse the IEEE1284.3 topology of the port. */
  432. parport_daisy_init(port);
  433. #endif
  434. if (!port->dev)
  435. pr_warn("%s: fix this legacy no-device port driver!\n",
  436. port->name);
  437. parport_proc_register(port);
  438. mutex_lock(&registration_lock);
  439. spin_lock_irq(&parportlist_lock);
  440. list_add_tail(&port->list, &portlist);
  441. for (i = 1; i < 3; i++) {
  442. struct parport *slave = port->slaves[i-1];
  443. if (slave)
  444. list_add_tail(&slave->list, &portlist);
  445. }
  446. spin_unlock_irq(&parportlist_lock);
  447. /* Let drivers know that new port(s) has arrived. */
  448. attach_driver_chain(port);
  449. for (i = 1; i < 3; i++) {
  450. struct parport *slave = port->slaves[i-1];
  451. if (slave)
  452. attach_driver_chain(slave);
  453. }
  454. mutex_unlock(&registration_lock);
  455. }
  456. EXPORT_SYMBOL(parport_announce_port);
  457. /**
  458. * parport_remove_port - deregister a parallel port
  459. * @port: parallel port to deregister
  460. *
  461. * When a parallel port driver is forcibly unloaded, or a
  462. * parallel port becomes inaccessible, the port driver must call
  463. * this function in order to deal with device drivers that still
  464. * want to use it.
  465. *
  466. * The parport structure associated with the port has its
  467. * operations structure replaced with one containing 'null'
  468. * operations that return errors or just don't do anything.
  469. *
  470. * Any drivers that have registered themselves using
  471. * parport_register_driver() are notified that the port is no
  472. * longer accessible by having their detach() routines called
  473. * with @port as the parameter.
  474. **/
  475. void parport_remove_port(struct parport *port)
  476. {
  477. int i;
  478. mutex_lock(&registration_lock);
  479. /* Spread the word. */
  480. detach_driver_chain(port);
  481. #ifdef CONFIG_PARPORT_1284
  482. /* Forget the IEEE1284.3 topology of the port. */
  483. parport_daisy_fini(port);
  484. for (i = 1; i < 3; i++) {
  485. struct parport *slave = port->slaves[i-1];
  486. if (!slave)
  487. continue;
  488. detach_driver_chain(slave);
  489. parport_daisy_fini(slave);
  490. }
  491. #endif
  492. port->ops = &dead_ops;
  493. spin_lock(&parportlist_lock);
  494. list_del_init(&port->list);
  495. for (i = 1; i < 3; i++) {
  496. struct parport *slave = port->slaves[i-1];
  497. if (slave)
  498. list_del_init(&slave->list);
  499. }
  500. spin_unlock(&parportlist_lock);
  501. mutex_unlock(&registration_lock);
  502. parport_proc_unregister(port);
  503. for (i = 1; i < 3; i++) {
  504. struct parport *slave = port->slaves[i-1];
  505. if (slave)
  506. parport_put_port(slave);
  507. }
  508. }
  509. EXPORT_SYMBOL(parport_remove_port);
  510. static void free_pardevice(struct device *dev)
  511. {
  512. struct pardevice *par_dev = to_pardevice(dev);
  513. kfree_const(par_dev->name);
  514. kfree(par_dev);
  515. }
  516. /**
  517. * parport_register_dev_model - register a device on a parallel port
  518. * @port: port to which the device is attached
  519. * @name: a name to refer to the device
  520. * @par_dev_cb: struct containing callbacks
  521. * @id: device number to be given to the device
  522. *
  523. * This function, called by parallel port device drivers,
  524. * declares that a device is connected to a port, and tells the
  525. * system all it needs to know.
  526. *
  527. * The struct pardev_cb contains pointer to callbacks. preemption
  528. * callback function, @preempt, is called when this device driver
  529. * has claimed access to the port but another device driver wants
  530. * to use it. It is given, @private, as its parameter, and should
  531. * return zero if it is willing for the system to release the port
  532. * to another driver on its behalf. If it wants to keep control of
  533. * the port it should return non-zero, and no action will be taken.
  534. * It is good manners for the driver to try to release the port at
  535. * the earliest opportunity after its preemption callback rejects a
  536. * preemption attempt. Note that if a preemption callback is happy
  537. * for preemption to go ahead, there is no need to release the
  538. * port; it is done automatically. This function may not block, as
  539. * it may be called from interrupt context. If the device driver
  540. * does not support preemption, @preempt can be %NULL.
  541. *
  542. * The wake-up ("kick") callback function, @wakeup, is called when
  543. * the port is available to be claimed for exclusive access; that
  544. * is, parport_claim() is guaranteed to succeed when called from
  545. * inside the wake-up callback function. If the driver wants to
  546. * claim the port it should do so; otherwise, it need not take
  547. * any action. This function may not block, as it may be called
  548. * from interrupt context. If the device driver does not want to
  549. * be explicitly invited to claim the port in this way, @wakeup can
  550. * be %NULL.
  551. *
  552. * The interrupt handler, @irq_func, is called when an interrupt
  553. * arrives from the parallel port. Note that if a device driver
  554. * wants to use interrupts it should use parport_enable_irq(),
  555. * and can also check the irq member of the parport structure
  556. * representing the port.
  557. *
  558. * The parallel port (lowlevel) driver is the one that has called
  559. * request_irq() and whose interrupt handler is called first.
  560. * This handler does whatever needs to be done to the hardware to
  561. * acknowledge the interrupt (for PC-style ports there is nothing
  562. * special to be done). It then tells the IEEE 1284 code about
  563. * the interrupt, which may involve reacting to an IEEE 1284
  564. * event depending on the current IEEE 1284 phase. After this,
  565. * it calls @irq_func. Needless to say, @irq_func will be called
  566. * from interrupt context, and may not block.
  567. *
  568. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  569. * so should only be used when sharing the port with other device
  570. * drivers is impossible and would lead to incorrect behaviour.
  571. * Use it sparingly! Normally, @flags will be zero.
  572. *
  573. * This function returns a pointer to a structure that represents
  574. * the device on the port, or %NULL if there is not enough memory
  575. * to allocate space for that structure.
  576. **/
  577. struct pardevice *
  578. parport_register_dev_model(struct parport *port, const char *name,
  579. const struct pardev_cb *par_dev_cb, int id)
  580. {
  581. struct pardevice *par_dev;
  582. const char *devname;
  583. int ret;
  584. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  585. /* An exclusive device is registered. */
  586. pr_err("%s: no more devices allowed\n", port->name);
  587. return NULL;
  588. }
  589. if (par_dev_cb->flags & PARPORT_DEV_LURK) {
  590. if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
  591. pr_info("%s: refused to register lurking device (%s) without callbacks\n",
  592. port->name, name);
  593. return NULL;
  594. }
  595. }
  596. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  597. if (port->physport->devices) {
  598. /*
  599. * If a device is already registered and this new
  600. * device wants exclusive access, then no need to
  601. * continue as we can not grant exclusive access to
  602. * this device.
  603. */
  604. pr_err("%s: cannot grant exclusive access for device %s\n",
  605. port->name, name);
  606. return NULL;
  607. }
  608. }
  609. if (!try_module_get(port->ops->owner))
  610. return NULL;
  611. parport_get_port(port);
  612. par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
  613. if (!par_dev)
  614. goto err_put_port;
  615. par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
  616. if (!par_dev->state)
  617. goto err_put_par_dev;
  618. devname = kstrdup_const(name, GFP_KERNEL);
  619. if (!devname)
  620. goto err_free_par_dev;
  621. par_dev->name = devname;
  622. par_dev->port = port;
  623. par_dev->daisy = -1;
  624. par_dev->preempt = par_dev_cb->preempt;
  625. par_dev->wakeup = par_dev_cb->wakeup;
  626. par_dev->private = par_dev_cb->private;
  627. par_dev->flags = par_dev_cb->flags;
  628. par_dev->irq_func = par_dev_cb->irq_func;
  629. par_dev->waiting = 0;
  630. par_dev->timeout = 5 * HZ;
  631. par_dev->dev.parent = &port->bus_dev;
  632. par_dev->dev.bus = &parport_bus_type;
  633. ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
  634. if (ret)
  635. goto err_free_devname;
  636. par_dev->dev.release = free_pardevice;
  637. par_dev->devmodel = true;
  638. ret = device_register(&par_dev->dev);
  639. if (ret) {
  640. kfree(par_dev->state);
  641. put_device(&par_dev->dev);
  642. goto err_put_port;
  643. }
  644. /* Chain this onto the list */
  645. par_dev->prev = NULL;
  646. /*
  647. * This function must not run from an irq handler so we don' t need
  648. * to clear irq on the local CPU. -arca
  649. */
  650. spin_lock(&port->physport->pardevice_lock);
  651. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  652. if (port->physport->devices) {
  653. spin_unlock(&port->physport->pardevice_lock);
  654. pr_debug("%s: cannot grant exclusive access for device %s\n",
  655. port->name, name);
  656. kfree(par_dev->state);
  657. device_unregister(&par_dev->dev);
  658. goto err_put_port;
  659. }
  660. port->flags |= PARPORT_FLAG_EXCL;
  661. }
  662. par_dev->next = port->physport->devices;
  663. wmb(); /*
  664. * Make sure that tmp->next is written before it's
  665. * added to the list; see comments marked 'no locking
  666. * required'
  667. */
  668. if (port->physport->devices)
  669. port->physport->devices->prev = par_dev;
  670. port->physport->devices = par_dev;
  671. spin_unlock(&port->physport->pardevice_lock);
  672. init_waitqueue_head(&par_dev->wait_q);
  673. par_dev->timeslice = parport_default_timeslice;
  674. par_dev->waitnext = NULL;
  675. par_dev->waitprev = NULL;
  676. /*
  677. * This has to be run as last thing since init_state may need other
  678. * pardevice fields. -arca
  679. */
  680. port->ops->init_state(par_dev, par_dev->state);
  681. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  682. port->proc_device = par_dev;
  683. parport_device_proc_register(par_dev);
  684. }
  685. return par_dev;
  686. err_free_devname:
  687. kfree_const(devname);
  688. err_free_par_dev:
  689. kfree(par_dev->state);
  690. err_put_par_dev:
  691. if (!par_dev->devmodel)
  692. kfree(par_dev);
  693. err_put_port:
  694. parport_put_port(port);
  695. module_put(port->ops->owner);
  696. return NULL;
  697. }
  698. EXPORT_SYMBOL(parport_register_dev_model);
  699. /**
  700. * parport_unregister_device - deregister a device on a parallel port
  701. * @dev: pointer to structure representing device
  702. *
  703. * This undoes the effect of parport_register_device().
  704. **/
  705. void parport_unregister_device(struct pardevice *dev)
  706. {
  707. struct parport *port;
  708. #ifdef PARPORT_PARANOID
  709. if (!dev) {
  710. pr_err("%s: passed NULL\n", __func__);
  711. return;
  712. }
  713. #endif
  714. port = dev->port->physport;
  715. if (port->proc_device == dev) {
  716. port->proc_device = NULL;
  717. clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
  718. parport_device_proc_unregister(dev);
  719. }
  720. if (port->cad == dev) {
  721. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  722. port->name, dev->name);
  723. parport_release(dev);
  724. }
  725. spin_lock(&port->pardevice_lock);
  726. if (dev->next)
  727. dev->next->prev = dev->prev;
  728. if (dev->prev)
  729. dev->prev->next = dev->next;
  730. else
  731. port->devices = dev->next;
  732. if (dev->flags & PARPORT_DEV_EXCL)
  733. port->flags &= ~PARPORT_FLAG_EXCL;
  734. spin_unlock(&port->pardevice_lock);
  735. /*
  736. * Make sure we haven't left any pointers around in the wait
  737. * list.
  738. */
  739. spin_lock_irq(&port->waitlist_lock);
  740. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  741. if (dev->waitprev)
  742. dev->waitprev->waitnext = dev->waitnext;
  743. else
  744. port->waithead = dev->waitnext;
  745. if (dev->waitnext)
  746. dev->waitnext->waitprev = dev->waitprev;
  747. else
  748. port->waittail = dev->waitprev;
  749. }
  750. spin_unlock_irq(&port->waitlist_lock);
  751. kfree(dev->state);
  752. device_unregister(&dev->dev);
  753. module_put(port->ops->owner);
  754. parport_put_port(port);
  755. }
  756. EXPORT_SYMBOL(parport_unregister_device);
  757. /**
  758. * parport_find_number - find a parallel port by number
  759. * @number: parallel port number
  760. *
  761. * This returns the parallel port with the specified number, or
  762. * %NULL if there is none.
  763. *
  764. * There is an implicit parport_get_port() done already; to throw
  765. * away the reference to the port that parport_find_number()
  766. * gives you, use parport_put_port().
  767. */
  768. struct parport *parport_find_number(int number)
  769. {
  770. struct parport *port, *result = NULL;
  771. if (list_empty(&portlist))
  772. get_lowlevel_driver();
  773. spin_lock(&parportlist_lock);
  774. list_for_each_entry(port, &portlist, list) {
  775. if (port->number == number) {
  776. result = parport_get_port(port);
  777. break;
  778. }
  779. }
  780. spin_unlock(&parportlist_lock);
  781. return result;
  782. }
  783. EXPORT_SYMBOL(parport_find_number);
  784. /**
  785. * parport_find_base - find a parallel port by base address
  786. * @base: base I/O address
  787. *
  788. * This returns the parallel port with the specified base
  789. * address, or %NULL if there is none.
  790. *
  791. * There is an implicit parport_get_port() done already; to throw
  792. * away the reference to the port that parport_find_base()
  793. * gives you, use parport_put_port().
  794. */
  795. struct parport *parport_find_base(unsigned long base)
  796. {
  797. struct parport *port, *result = NULL;
  798. if (list_empty(&portlist))
  799. get_lowlevel_driver();
  800. spin_lock(&parportlist_lock);
  801. list_for_each_entry(port, &portlist, list) {
  802. if (port->base == base) {
  803. result = parport_get_port(port);
  804. break;
  805. }
  806. }
  807. spin_unlock(&parportlist_lock);
  808. return result;
  809. }
  810. EXPORT_SYMBOL(parport_find_base);
  811. /**
  812. * parport_claim - claim access to a parallel port device
  813. * @dev: pointer to structure representing a device on the port
  814. *
  815. * This function will not block and so can be used from interrupt
  816. * context. If parport_claim() succeeds in claiming access to
  817. * the port it returns zero and the port is available to use. It
  818. * may fail (returning non-zero) if the port is in use by another
  819. * driver and that driver is not willing to relinquish control of
  820. * the port.
  821. **/
  822. int parport_claim(struct pardevice *dev)
  823. {
  824. struct pardevice *oldcad;
  825. struct parport *port = dev->port->physport;
  826. unsigned long flags;
  827. if (port->cad == dev) {
  828. pr_info("%s: %s already owner\n", dev->port->name, dev->name);
  829. return 0;
  830. }
  831. /* Preempt any current device */
  832. write_lock_irqsave(&port->cad_lock, flags);
  833. oldcad = port->cad;
  834. if (oldcad) {
  835. if (oldcad->preempt) {
  836. if (oldcad->preempt(oldcad->private))
  837. goto blocked;
  838. port->ops->save_state(port, dev->state);
  839. } else
  840. goto blocked;
  841. if (port->cad != oldcad) {
  842. /*
  843. * I think we'll actually deadlock rather than
  844. * get here, but just in case..
  845. */
  846. pr_warn("%s: %s released port when preempted!\n",
  847. port->name, oldcad->name);
  848. if (port->cad)
  849. goto blocked;
  850. }
  851. }
  852. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  853. if (dev->waiting & 1) {
  854. dev->waiting = 0;
  855. /* Take ourselves out of the wait list again. */
  856. spin_lock_irq(&port->waitlist_lock);
  857. if (dev->waitprev)
  858. dev->waitprev->waitnext = dev->waitnext;
  859. else
  860. port->waithead = dev->waitnext;
  861. if (dev->waitnext)
  862. dev->waitnext->waitprev = dev->waitprev;
  863. else
  864. port->waittail = dev->waitprev;
  865. spin_unlock_irq(&port->waitlist_lock);
  866. dev->waitprev = dev->waitnext = NULL;
  867. }
  868. /* Now we do the change of devices */
  869. port->cad = dev;
  870. #ifdef CONFIG_PARPORT_1284
  871. /* If it's a mux port, select it. */
  872. if (dev->port->muxport >= 0) {
  873. /* FIXME */
  874. port->muxsel = dev->port->muxport;
  875. }
  876. /* If it's a daisy chain device, select it. */
  877. if (dev->daisy >= 0) {
  878. /* This could be lazier. */
  879. if (!parport_daisy_select(port, dev->daisy,
  880. IEEE1284_MODE_COMPAT))
  881. port->daisy = dev->daisy;
  882. }
  883. #endif /* IEEE1284.3 support */
  884. /* Restore control registers */
  885. port->ops->restore_state(port, dev->state);
  886. write_unlock_irqrestore(&port->cad_lock, flags);
  887. dev->time = jiffies;
  888. return 0;
  889. blocked:
  890. /*
  891. * If this is the first time we tried to claim the port, register an
  892. * interest. This is only allowed for devices sleeping in
  893. * parport_claim_or_block(), or those with a wakeup function.
  894. */
  895. /* The cad_lock is still held for writing here */
  896. if (dev->waiting & 2 || dev->wakeup) {
  897. spin_lock(&port->waitlist_lock);
  898. if (test_and_set_bit(0, &dev->waiting) == 0) {
  899. /* First add ourselves to the end of the wait list. */
  900. dev->waitnext = NULL;
  901. dev->waitprev = port->waittail;
  902. if (port->waittail) {
  903. port->waittail->waitnext = dev;
  904. port->waittail = dev;
  905. } else
  906. port->waithead = port->waittail = dev;
  907. }
  908. spin_unlock(&port->waitlist_lock);
  909. }
  910. write_unlock_irqrestore(&port->cad_lock, flags);
  911. return -EAGAIN;
  912. }
  913. EXPORT_SYMBOL(parport_claim);
  914. /**
  915. * parport_claim_or_block - claim access to a parallel port device
  916. * @dev: pointer to structure representing a device on the port
  917. *
  918. * This behaves like parport_claim(), but will block if necessary
  919. * to wait for the port to be free. A return value of 1
  920. * indicates that it slept; 0 means that it succeeded without
  921. * needing to sleep. A negative error code indicates failure.
  922. **/
  923. int parport_claim_or_block(struct pardevice *dev)
  924. {
  925. int r;
  926. /*
  927. * Signal to parport_claim() that we can wait even without a
  928. * wakeup function.
  929. */
  930. dev->waiting = 2;
  931. /* Try to claim the port. If this fails, we need to sleep. */
  932. r = parport_claim(dev);
  933. if (r == -EAGAIN) {
  934. #ifdef PARPORT_DEBUG_SHARING
  935. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
  936. dev->name);
  937. #endif
  938. /*
  939. * FIXME!!! Use the proper locking for dev->waiting,
  940. * and make this use the "wait_event_interruptible()"
  941. * interfaces. The cli/sti that used to be here
  942. * did nothing.
  943. *
  944. * See also parport_release()
  945. */
  946. /*
  947. * If dev->waiting is clear now, an interrupt
  948. * gave us the port and we would deadlock if we slept.
  949. */
  950. if (dev->waiting) {
  951. wait_event_interruptible(dev->wait_q,
  952. !dev->waiting);
  953. if (signal_pending(current))
  954. return -EINTR;
  955. r = 1;
  956. } else {
  957. r = 0;
  958. #ifdef PARPORT_DEBUG_SHARING
  959. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  960. dev->name);
  961. #endif
  962. }
  963. #ifdef PARPORT_DEBUG_SHARING
  964. if (dev->port->physport->cad != dev)
  965. printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
  966. dev->name, dev->port->physport->cad ?
  967. dev->port->physport->cad->name : "nobody");
  968. #endif
  969. }
  970. dev->waiting = 0;
  971. return r;
  972. }
  973. EXPORT_SYMBOL(parport_claim_or_block);
  974. /**
  975. * parport_release - give up access to a parallel port device
  976. * @dev: pointer to structure representing parallel port device
  977. *
  978. * This function cannot fail, but it should not be called without
  979. * the port claimed. Similarly, if the port is already claimed
  980. * you should not try claiming it again.
  981. **/
  982. void parport_release(struct pardevice *dev)
  983. {
  984. struct parport *port = dev->port->physport;
  985. struct pardevice *pd;
  986. unsigned long flags;
  987. /* Make sure that dev is the current device */
  988. write_lock_irqsave(&port->cad_lock, flags);
  989. if (port->cad != dev) {
  990. write_unlock_irqrestore(&port->cad_lock, flags);
  991. pr_warn("%s: %s tried to release parport when not owner\n",
  992. port->name, dev->name);
  993. return;
  994. }
  995. #ifdef CONFIG_PARPORT_1284
  996. /* If this is on a mux port, deselect it. */
  997. if (dev->port->muxport >= 0) {
  998. /* FIXME */
  999. port->muxsel = -1;
  1000. }
  1001. /* If this is a daisy device, deselect it. */
  1002. if (dev->daisy >= 0) {
  1003. parport_daisy_deselect_all(port);
  1004. port->daisy = -1;
  1005. }
  1006. #endif
  1007. port->cad = NULL;
  1008. write_unlock_irqrestore(&port->cad_lock, flags);
  1009. /* Save control registers */
  1010. port->ops->save_state(port, dev->state);
  1011. /*
  1012. * If anybody is waiting, find out who's been there longest and
  1013. * then wake them up. (Note: no locking required)
  1014. */
  1015. /* !!! LOCKING IS NEEDED HERE */
  1016. for (pd = port->waithead; pd; pd = pd->waitnext) {
  1017. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  1018. parport_claim(pd);
  1019. if (waitqueue_active(&pd->wait_q))
  1020. wake_up_interruptible(&pd->wait_q);
  1021. return;
  1022. } else if (pd->wakeup) {
  1023. pd->wakeup(pd->private);
  1024. if (dev->port->cad) /* racy but no matter */
  1025. return;
  1026. } else {
  1027. pr_err("%s: don't know how to wake %s\n",
  1028. port->name, pd->name);
  1029. }
  1030. }
  1031. /*
  1032. * Nobody was waiting, so walk the list to see if anyone is
  1033. * interested in being woken up. (Note: no locking required)
  1034. */
  1035. /* !!! LOCKING IS NEEDED HERE */
  1036. for (pd = port->devices; !port->cad && pd; pd = pd->next) {
  1037. if (pd->wakeup && pd != dev)
  1038. pd->wakeup(pd->private);
  1039. }
  1040. }
  1041. EXPORT_SYMBOL(parport_release);
  1042. irqreturn_t parport_irq_handler(int irq, void *dev_id)
  1043. {
  1044. struct parport *port = dev_id;
  1045. parport_generic_irq(port);
  1046. return IRQ_HANDLED;
  1047. }
  1048. EXPORT_SYMBOL(parport_irq_handler);
  1049. MODULE_DESCRIPTION("Parallel-port resource manager");
  1050. MODULE_LICENSE("GPL");