ap_bus.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright IBM Corp. 2006, 2023
  4. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  5. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. * Ralph Wuerthner <rwuerthn@de.ibm.com>
  7. * Felix Beck <felix.beck@de.ibm.com>
  8. * Holger Dengler <hd@linux.vnet.ibm.com>
  9. * Harald Freudenberger <freude@linux.ibm.com>
  10. *
  11. * Adjunct processor bus.
  12. */
  13. #define KMSG_COMPONENT "ap"
  14. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  15. #include <linux/kernel_stat.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/init.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/freezer.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/slab.h>
  24. #include <linux/notifier.h>
  25. #include <linux/kthread.h>
  26. #include <linux/mutex.h>
  27. #include <asm/airq.h>
  28. #include <asm/tpi.h>
  29. #include <linux/atomic.h>
  30. #include <asm/isc.h>
  31. #include <linux/hrtimer.h>
  32. #include <linux/ktime.h>
  33. #include <asm/facility.h>
  34. #include <linux/crypto.h>
  35. #include <linux/mod_devicetable.h>
  36. #include <linux/debugfs.h>
  37. #include <linux/ctype.h>
  38. #include <linux/module.h>
  39. #include <asm/uv.h>
  40. #include <asm/chsc.h>
  41. #include "ap_bus.h"
  42. #include "ap_debug.h"
  43. MODULE_AUTHOR("IBM Corporation");
  44. MODULE_DESCRIPTION("Adjunct Processor Bus driver");
  45. MODULE_LICENSE("GPL");
  46. int ap_domain_index = -1; /* Adjunct Processor Domain Index */
  47. static DEFINE_SPINLOCK(ap_domain_lock);
  48. module_param_named(domain, ap_domain_index, int, 0440);
  49. MODULE_PARM_DESC(domain, "domain index for ap devices");
  50. EXPORT_SYMBOL(ap_domain_index);
  51. static int ap_thread_flag;
  52. module_param_named(poll_thread, ap_thread_flag, int, 0440);
  53. MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
  54. static char *apm_str;
  55. module_param_named(apmask, apm_str, charp, 0440);
  56. MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
  57. static char *aqm_str;
  58. module_param_named(aqmask, aqm_str, charp, 0440);
  59. MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
  60. static int ap_useirq = 1;
  61. module_param_named(useirq, ap_useirq, int, 0440);
  62. MODULE_PARM_DESC(useirq, "Use interrupt if available, default is 1 (on).");
  63. atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
  64. EXPORT_SYMBOL(ap_max_msg_size);
  65. static struct device *ap_root_device;
  66. /* Hashtable of all queue devices on the AP bus */
  67. DEFINE_HASHTABLE(ap_queues, 8);
  68. /* lock used for the ap_queues hashtable */
  69. DEFINE_SPINLOCK(ap_queues_lock);
  70. /* Default permissions (ioctl, card and domain masking) */
  71. struct ap_perms ap_perms;
  72. EXPORT_SYMBOL(ap_perms);
  73. DEFINE_MUTEX(ap_perms_mutex);
  74. EXPORT_SYMBOL(ap_perms_mutex);
  75. /* # of bindings complete since init */
  76. static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
  77. /* completion for APQN bindings complete */
  78. static DECLARE_COMPLETION(ap_apqn_bindings_complete);
  79. static struct ap_config_info qci[2];
  80. static struct ap_config_info *const ap_qci_info = &qci[0];
  81. static struct ap_config_info *const ap_qci_info_old = &qci[1];
  82. /*
  83. * AP bus related debug feature things.
  84. */
  85. debug_info_t *ap_dbf_info;
  86. /*
  87. * AP bus rescan related things.
  88. */
  89. static bool ap_scan_bus(void);
  90. static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
  91. static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
  92. static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */
  93. static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
  94. static int ap_scan_bus_time = AP_CONFIG_TIME;
  95. static struct timer_list ap_scan_bus_timer;
  96. static void ap_scan_bus_wq_callback(struct work_struct *);
  97. static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback);
  98. /*
  99. * Tasklet & timer for AP request polling and interrupts
  100. */
  101. static void ap_tasklet_fn(unsigned long);
  102. static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
  103. static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
  104. static struct task_struct *ap_poll_kthread;
  105. static DEFINE_MUTEX(ap_poll_thread_mutex);
  106. static DEFINE_SPINLOCK(ap_poll_timer_lock);
  107. static struct hrtimer ap_poll_timer;
  108. /*
  109. * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
  110. * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
  111. */
  112. static unsigned long poll_high_timeout = 250000UL;
  113. /*
  114. * Some state machine states only require a low frequency polling.
  115. * We use 25 Hz frequency for these.
  116. */
  117. static unsigned long poll_low_timeout = 40000000UL;
  118. /* Maximum domain id, if not given via qci */
  119. static int ap_max_domain_id = 15;
  120. /* Maximum adapter id, if not given via qci */
  121. static int ap_max_adapter_id = 63;
  122. static const struct bus_type ap_bus_type;
  123. /* Adapter interrupt definitions */
  124. static void ap_interrupt_handler(struct airq_struct *airq,
  125. struct tpi_info *tpi_info);
  126. static bool ap_irq_flag;
  127. static struct airq_struct ap_airq = {
  128. .handler = ap_interrupt_handler,
  129. .isc = AP_ISC,
  130. };
  131. /**
  132. * ap_airq_ptr() - Get the address of the adapter interrupt indicator
  133. *
  134. * Returns the address of the local-summary-indicator of the adapter
  135. * interrupt handler for AP, or NULL if adapter interrupts are not
  136. * available.
  137. */
  138. void *ap_airq_ptr(void)
  139. {
  140. if (ap_irq_flag)
  141. return ap_airq.lsi_ptr;
  142. return NULL;
  143. }
  144. /**
  145. * ap_interrupts_available(): Test if AP interrupts are available.
  146. *
  147. * Returns 1 if AP interrupts are available.
  148. */
  149. static int ap_interrupts_available(void)
  150. {
  151. return test_facility(65);
  152. }
  153. /**
  154. * ap_qci_available(): Test if AP configuration
  155. * information can be queried via QCI subfunction.
  156. *
  157. * Returns 1 if subfunction PQAP(QCI) is available.
  158. */
  159. static int ap_qci_available(void)
  160. {
  161. return test_facility(12);
  162. }
  163. /**
  164. * ap_apft_available(): Test if AP facilities test (APFT)
  165. * facility is available.
  166. *
  167. * Returns 1 if APFT is available.
  168. */
  169. static int ap_apft_available(void)
  170. {
  171. return test_facility(15);
  172. }
  173. /*
  174. * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
  175. *
  176. * Returns 1 if the QACT subfunction is available.
  177. */
  178. static inline int ap_qact_available(void)
  179. {
  180. return ap_qci_info->qact;
  181. }
  182. /*
  183. * ap_sb_available(): Test if the AP secure binding facility is available.
  184. *
  185. * Returns 1 if secure binding facility is available.
  186. */
  187. int ap_sb_available(void)
  188. {
  189. return ap_qci_info->apsb;
  190. }
  191. /*
  192. * ap_is_se_guest(): Check for SE guest with AP pass-through support.
  193. */
  194. bool ap_is_se_guest(void)
  195. {
  196. return is_prot_virt_guest() && ap_sb_available();
  197. }
  198. EXPORT_SYMBOL(ap_is_se_guest);
  199. /**
  200. * ap_init_qci_info(): Allocate and query qci config info.
  201. * Does also update the static variables ap_max_domain_id
  202. * and ap_max_adapter_id if this info is available.
  203. */
  204. static void __init ap_init_qci_info(void)
  205. {
  206. if (!ap_qci_available() ||
  207. ap_qci(ap_qci_info)) {
  208. AP_DBF_INFO("%s QCI not supported\n", __func__);
  209. return;
  210. }
  211. memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
  212. AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
  213. if (ap_qci_info->apxa) {
  214. if (ap_qci_info->na) {
  215. ap_max_adapter_id = ap_qci_info->na;
  216. AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
  217. __func__, ap_max_adapter_id);
  218. }
  219. if (ap_qci_info->nd) {
  220. ap_max_domain_id = ap_qci_info->nd;
  221. AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
  222. __func__, ap_max_domain_id);
  223. }
  224. }
  225. }
  226. /*
  227. * ap_test_config(): helper function to extract the nrth bit
  228. * within the unsigned int array field.
  229. */
  230. static inline int ap_test_config(unsigned int *field, unsigned int nr)
  231. {
  232. return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
  233. }
  234. /*
  235. * ap_test_config_card_id(): Test, whether an AP card ID is configured.
  236. *
  237. * Returns 0 if the card is not configured
  238. * 1 if the card is configured or
  239. * if the configuration information is not available
  240. */
  241. static inline int ap_test_config_card_id(unsigned int id)
  242. {
  243. if (id > ap_max_adapter_id)
  244. return 0;
  245. if (ap_qci_info->flags)
  246. return ap_test_config(ap_qci_info->apm, id);
  247. return 1;
  248. }
  249. /*
  250. * ap_test_config_usage_domain(): Test, whether an AP usage domain
  251. * is configured.
  252. *
  253. * Returns 0 if the usage domain is not configured
  254. * 1 if the usage domain is configured or
  255. * if the configuration information is not available
  256. */
  257. int ap_test_config_usage_domain(unsigned int domain)
  258. {
  259. if (domain > ap_max_domain_id)
  260. return 0;
  261. if (ap_qci_info->flags)
  262. return ap_test_config(ap_qci_info->aqm, domain);
  263. return 1;
  264. }
  265. EXPORT_SYMBOL(ap_test_config_usage_domain);
  266. /*
  267. * ap_test_config_ctrl_domain(): Test, whether an AP control domain
  268. * is configured.
  269. * @domain AP control domain ID
  270. *
  271. * Returns 1 if the control domain is configured
  272. * 0 in all other cases
  273. */
  274. int ap_test_config_ctrl_domain(unsigned int domain)
  275. {
  276. if (!ap_qci_info || domain > ap_max_domain_id)
  277. return 0;
  278. return ap_test_config(ap_qci_info->adm, domain);
  279. }
  280. EXPORT_SYMBOL(ap_test_config_ctrl_domain);
  281. /*
  282. * ap_queue_info(): Check and get AP queue info.
  283. * Returns: 1 if APQN exists and info is filled,
  284. * 0 if APQN seems to exist but there is no info
  285. * available (eg. caused by an asynch pending error)
  286. * -1 invalid APQN, TAPQ error or AP queue status which
  287. * indicates there is no APQN.
  288. */
  289. static int ap_queue_info(ap_qid_t qid, struct ap_tapq_hwinfo *hwinfo,
  290. bool *decfg, bool *cstop)
  291. {
  292. struct ap_queue_status status;
  293. hwinfo->value = 0;
  294. /* make sure we don't run into a specifiation exception */
  295. if (AP_QID_CARD(qid) > ap_max_adapter_id ||
  296. AP_QID_QUEUE(qid) > ap_max_domain_id)
  297. return -1;
  298. /* call TAPQ on this APQN */
  299. status = ap_test_queue(qid, ap_apft_available(), hwinfo);
  300. switch (status.response_code) {
  301. case AP_RESPONSE_NORMAL:
  302. case AP_RESPONSE_RESET_IN_PROGRESS:
  303. case AP_RESPONSE_DECONFIGURED:
  304. case AP_RESPONSE_CHECKSTOPPED:
  305. case AP_RESPONSE_BUSY:
  306. /* For all these RCs the tapq info should be available */
  307. break;
  308. default:
  309. /* On a pending async error the info should be available */
  310. if (!status.async)
  311. return -1;
  312. break;
  313. }
  314. /* There should be at least one of the mode bits set */
  315. if (WARN_ON_ONCE(!hwinfo->value))
  316. return 0;
  317. *decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
  318. *cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
  319. return 1;
  320. }
  321. void ap_wait(enum ap_sm_wait wait)
  322. {
  323. ktime_t hr_time;
  324. switch (wait) {
  325. case AP_SM_WAIT_AGAIN:
  326. case AP_SM_WAIT_INTERRUPT:
  327. if (ap_irq_flag)
  328. break;
  329. if (ap_poll_kthread) {
  330. wake_up(&ap_poll_wait);
  331. break;
  332. }
  333. fallthrough;
  334. case AP_SM_WAIT_LOW_TIMEOUT:
  335. case AP_SM_WAIT_HIGH_TIMEOUT:
  336. spin_lock_bh(&ap_poll_timer_lock);
  337. if (!hrtimer_is_queued(&ap_poll_timer)) {
  338. hr_time =
  339. wait == AP_SM_WAIT_LOW_TIMEOUT ?
  340. poll_low_timeout : poll_high_timeout;
  341. hrtimer_forward_now(&ap_poll_timer, hr_time);
  342. hrtimer_restart(&ap_poll_timer);
  343. }
  344. spin_unlock_bh(&ap_poll_timer_lock);
  345. break;
  346. case AP_SM_WAIT_NONE:
  347. default:
  348. break;
  349. }
  350. }
  351. /**
  352. * ap_request_timeout(): Handling of request timeouts
  353. * @t: timer making this callback
  354. *
  355. * Handles request timeouts.
  356. */
  357. void ap_request_timeout(struct timer_list *t)
  358. {
  359. struct ap_queue *aq = from_timer(aq, t, timeout);
  360. spin_lock_bh(&aq->lock);
  361. ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
  362. spin_unlock_bh(&aq->lock);
  363. }
  364. /**
  365. * ap_poll_timeout(): AP receive polling for finished AP requests.
  366. * @unused: Unused pointer.
  367. *
  368. * Schedules the AP tasklet using a high resolution timer.
  369. */
  370. static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
  371. {
  372. tasklet_schedule(&ap_tasklet);
  373. return HRTIMER_NORESTART;
  374. }
  375. /**
  376. * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
  377. * @airq: pointer to adapter interrupt descriptor
  378. * @tpi_info: ignored
  379. */
  380. static void ap_interrupt_handler(struct airq_struct *airq,
  381. struct tpi_info *tpi_info)
  382. {
  383. inc_irq_stat(IRQIO_APB);
  384. tasklet_schedule(&ap_tasklet);
  385. }
  386. /**
  387. * ap_tasklet_fn(): Tasklet to poll all AP devices.
  388. * @dummy: Unused variable
  389. *
  390. * Poll all AP devices on the bus.
  391. */
  392. static void ap_tasklet_fn(unsigned long dummy)
  393. {
  394. int bkt;
  395. struct ap_queue *aq;
  396. enum ap_sm_wait wait = AP_SM_WAIT_NONE;
  397. /* Reset the indicator if interrupts are used. Thus new interrupts can
  398. * be received. Doing it in the beginning of the tasklet is therefore
  399. * important that no requests on any AP get lost.
  400. */
  401. if (ap_irq_flag)
  402. xchg(ap_airq.lsi_ptr, 0);
  403. spin_lock_bh(&ap_queues_lock);
  404. hash_for_each(ap_queues, bkt, aq, hnode) {
  405. spin_lock_bh(&aq->lock);
  406. wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
  407. spin_unlock_bh(&aq->lock);
  408. }
  409. spin_unlock_bh(&ap_queues_lock);
  410. ap_wait(wait);
  411. }
  412. static int ap_pending_requests(void)
  413. {
  414. int bkt;
  415. struct ap_queue *aq;
  416. spin_lock_bh(&ap_queues_lock);
  417. hash_for_each(ap_queues, bkt, aq, hnode) {
  418. if (aq->queue_count == 0)
  419. continue;
  420. spin_unlock_bh(&ap_queues_lock);
  421. return 1;
  422. }
  423. spin_unlock_bh(&ap_queues_lock);
  424. return 0;
  425. }
  426. /**
  427. * ap_poll_thread(): Thread that polls for finished requests.
  428. * @data: Unused pointer
  429. *
  430. * AP bus poll thread. The purpose of this thread is to poll for
  431. * finished requests in a loop if there is a "free" cpu - that is
  432. * a cpu that doesn't have anything better to do. The polling stops
  433. * as soon as there is another task or if all messages have been
  434. * delivered.
  435. */
  436. static int ap_poll_thread(void *data)
  437. {
  438. DECLARE_WAITQUEUE(wait, current);
  439. set_user_nice(current, MAX_NICE);
  440. set_freezable();
  441. while (!kthread_should_stop()) {
  442. add_wait_queue(&ap_poll_wait, &wait);
  443. set_current_state(TASK_INTERRUPTIBLE);
  444. if (!ap_pending_requests()) {
  445. schedule();
  446. try_to_freeze();
  447. }
  448. set_current_state(TASK_RUNNING);
  449. remove_wait_queue(&ap_poll_wait, &wait);
  450. if (need_resched()) {
  451. schedule();
  452. try_to_freeze();
  453. continue;
  454. }
  455. ap_tasklet_fn(0);
  456. }
  457. return 0;
  458. }
  459. static int ap_poll_thread_start(void)
  460. {
  461. int rc;
  462. if (ap_irq_flag || ap_poll_kthread)
  463. return 0;
  464. mutex_lock(&ap_poll_thread_mutex);
  465. ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
  466. rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
  467. if (rc)
  468. ap_poll_kthread = NULL;
  469. mutex_unlock(&ap_poll_thread_mutex);
  470. return rc;
  471. }
  472. static void ap_poll_thread_stop(void)
  473. {
  474. if (!ap_poll_kthread)
  475. return;
  476. mutex_lock(&ap_poll_thread_mutex);
  477. kthread_stop(ap_poll_kthread);
  478. ap_poll_kthread = NULL;
  479. mutex_unlock(&ap_poll_thread_mutex);
  480. }
  481. #define is_card_dev(x) ((x)->parent == ap_root_device)
  482. #define is_queue_dev(x) ((x)->parent != ap_root_device)
  483. /**
  484. * ap_bus_match()
  485. * @dev: Pointer to device
  486. * @drv: Pointer to device_driver
  487. *
  488. * AP bus driver registration/unregistration.
  489. */
  490. static int ap_bus_match(struct device *dev, const struct device_driver *drv)
  491. {
  492. const struct ap_driver *ap_drv = to_ap_drv(drv);
  493. struct ap_device_id *id;
  494. /*
  495. * Compare device type of the device with the list of
  496. * supported types of the device_driver.
  497. */
  498. for (id = ap_drv->ids; id->match_flags; id++) {
  499. if (is_card_dev(dev) &&
  500. id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
  501. id->dev_type == to_ap_dev(dev)->device_type)
  502. return 1;
  503. if (is_queue_dev(dev) &&
  504. id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
  505. id->dev_type == to_ap_dev(dev)->device_type)
  506. return 1;
  507. }
  508. return 0;
  509. }
  510. /**
  511. * ap_uevent(): Uevent function for AP devices.
  512. * @dev: Pointer to device
  513. * @env: Pointer to kobj_uevent_env
  514. *
  515. * It sets up a single environment variable DEV_TYPE which contains the
  516. * hardware device type.
  517. */
  518. static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
  519. {
  520. int rc = 0;
  521. const struct ap_device *ap_dev = to_ap_dev(dev);
  522. /* Uevents from ap bus core don't need extensions to the env */
  523. if (dev == ap_root_device)
  524. return 0;
  525. if (is_card_dev(dev)) {
  526. struct ap_card *ac = to_ap_card(&ap_dev->device);
  527. /* Set up DEV_TYPE environment variable. */
  528. rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
  529. if (rc)
  530. return rc;
  531. /* Add MODALIAS= */
  532. rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
  533. if (rc)
  534. return rc;
  535. /* Add MODE=<accel|cca|ep11> */
  536. if (ac->hwinfo.accel)
  537. rc = add_uevent_var(env, "MODE=accel");
  538. else if (ac->hwinfo.cca)
  539. rc = add_uevent_var(env, "MODE=cca");
  540. else if (ac->hwinfo.ep11)
  541. rc = add_uevent_var(env, "MODE=ep11");
  542. if (rc)
  543. return rc;
  544. } else {
  545. struct ap_queue *aq = to_ap_queue(&ap_dev->device);
  546. /* Add MODE=<accel|cca|ep11> */
  547. if (aq->card->hwinfo.accel)
  548. rc = add_uevent_var(env, "MODE=accel");
  549. else if (aq->card->hwinfo.cca)
  550. rc = add_uevent_var(env, "MODE=cca");
  551. else if (aq->card->hwinfo.ep11)
  552. rc = add_uevent_var(env, "MODE=ep11");
  553. if (rc)
  554. return rc;
  555. }
  556. return 0;
  557. }
  558. static void ap_send_init_scan_done_uevent(void)
  559. {
  560. char *envp[] = { "INITSCAN=done", NULL };
  561. kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
  562. }
  563. static void ap_send_bindings_complete_uevent(void)
  564. {
  565. char buf[32];
  566. char *envp[] = { "BINDINGS=complete", buf, NULL };
  567. snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
  568. atomic64_inc_return(&ap_bindings_complete_count));
  569. kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
  570. }
  571. void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
  572. {
  573. char buf[16];
  574. char *envp[] = { buf, NULL };
  575. snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
  576. kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
  577. }
  578. EXPORT_SYMBOL(ap_send_config_uevent);
  579. void ap_send_online_uevent(struct ap_device *ap_dev, int online)
  580. {
  581. char buf[16];
  582. char *envp[] = { buf, NULL };
  583. snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
  584. kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
  585. }
  586. EXPORT_SYMBOL(ap_send_online_uevent);
  587. static void ap_send_mask_changed_uevent(unsigned long *newapm,
  588. unsigned long *newaqm)
  589. {
  590. char buf[100];
  591. char *envp[] = { buf, NULL };
  592. if (newapm)
  593. snprintf(buf, sizeof(buf),
  594. "APMASK=0x%016lx%016lx%016lx%016lx\n",
  595. newapm[0], newapm[1], newapm[2], newapm[3]);
  596. else
  597. snprintf(buf, sizeof(buf),
  598. "AQMASK=0x%016lx%016lx%016lx%016lx\n",
  599. newaqm[0], newaqm[1], newaqm[2], newaqm[3]);
  600. kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
  601. }
  602. /*
  603. * calc # of bound APQNs
  604. */
  605. struct __ap_calc_ctrs {
  606. unsigned int apqns;
  607. unsigned int bound;
  608. };
  609. static int __ap_calc_helper(struct device *dev, void *arg)
  610. {
  611. struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg;
  612. if (is_queue_dev(dev)) {
  613. pctrs->apqns++;
  614. if (dev->driver)
  615. pctrs->bound++;
  616. }
  617. return 0;
  618. }
  619. static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
  620. {
  621. struct __ap_calc_ctrs ctrs;
  622. memset(&ctrs, 0, sizeof(ctrs));
  623. bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper);
  624. *apqns = ctrs.apqns;
  625. *bound = ctrs.bound;
  626. }
  627. /*
  628. * After ap bus scan do check if all existing APQNs are
  629. * bound to device drivers.
  630. */
  631. static void ap_check_bindings_complete(void)
  632. {
  633. unsigned int apqns, bound;
  634. if (atomic64_read(&ap_scan_bus_count) >= 1) {
  635. ap_calc_bound_apqns(&apqns, &bound);
  636. if (bound == apqns) {
  637. if (!completion_done(&ap_apqn_bindings_complete)) {
  638. complete_all(&ap_apqn_bindings_complete);
  639. ap_send_bindings_complete_uevent();
  640. pr_debug("all apqn bindings complete\n");
  641. }
  642. }
  643. }
  644. }
  645. /*
  646. * Interface to wait for the AP bus to have done one initial ap bus
  647. * scan and all detected APQNs have been bound to device drivers.
  648. * If these both conditions are not fulfilled, this function blocks
  649. * on a condition with wait_for_completion_interruptible_timeout().
  650. * If these both conditions are fulfilled (before the timeout hits)
  651. * the return value is 0. If the timeout (in jiffies) hits instead
  652. * -ETIME is returned. On failures negative return values are
  653. * returned to the caller.
  654. */
  655. int ap_wait_apqn_bindings_complete(unsigned long timeout)
  656. {
  657. int rc = 0;
  658. long l;
  659. if (completion_done(&ap_apqn_bindings_complete))
  660. return 0;
  661. if (timeout)
  662. l = wait_for_completion_interruptible_timeout(
  663. &ap_apqn_bindings_complete, timeout);
  664. else
  665. l = wait_for_completion_interruptible(
  666. &ap_apqn_bindings_complete);
  667. if (l < 0)
  668. rc = l == -ERESTARTSYS ? -EINTR : l;
  669. else if (l == 0 && timeout)
  670. rc = -ETIME;
  671. pr_debug("rc=%d\n", rc);
  672. return rc;
  673. }
  674. EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
  675. static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
  676. {
  677. if (is_queue_dev(dev) &&
  678. AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
  679. device_unregister(dev);
  680. return 0;
  681. }
  682. static int __ap_revise_reserved(struct device *dev, void *dummy)
  683. {
  684. int rc, card, queue, devres, drvres;
  685. if (is_queue_dev(dev)) {
  686. card = AP_QID_CARD(to_ap_queue(dev)->qid);
  687. queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
  688. mutex_lock(&ap_perms_mutex);
  689. devres = test_bit_inv(card, ap_perms.apm) &&
  690. test_bit_inv(queue, ap_perms.aqm);
  691. mutex_unlock(&ap_perms_mutex);
  692. drvres = to_ap_drv(dev->driver)->flags
  693. & AP_DRIVER_FLAG_DEFAULT;
  694. if (!!devres != !!drvres) {
  695. pr_debug("reprobing queue=%02x.%04x\n", card, queue);
  696. rc = device_reprobe(dev);
  697. if (rc)
  698. AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
  699. __func__, card, queue);
  700. }
  701. }
  702. return 0;
  703. }
  704. static void ap_bus_revise_bindings(void)
  705. {
  706. bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
  707. }
  708. /**
  709. * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
  710. * default host driver or not.
  711. * @card: the APID of the adapter card to check
  712. * @queue: the APQI of the queue to check
  713. *
  714. * Note: the ap_perms_mutex must be locked by the caller of this function.
  715. *
  716. * Return: an int specifying whether the AP adapter is reserved for the host (1)
  717. * or not (0).
  718. */
  719. int ap_owned_by_def_drv(int card, int queue)
  720. {
  721. int rc = 0;
  722. if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
  723. return -EINVAL;
  724. if (test_bit_inv(card, ap_perms.apm) &&
  725. test_bit_inv(queue, ap_perms.aqm))
  726. rc = 1;
  727. return rc;
  728. }
  729. EXPORT_SYMBOL(ap_owned_by_def_drv);
  730. /**
  731. * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
  732. * a set is reserved for the host drivers
  733. * or not.
  734. * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
  735. * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
  736. *
  737. * Note: the ap_perms_mutex must be locked by the caller of this function.
  738. *
  739. * Return: an int specifying whether each APQN is reserved for the host (1) or
  740. * not (0)
  741. */
  742. int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
  743. unsigned long *aqm)
  744. {
  745. int card, queue, rc = 0;
  746. for (card = 0; !rc && card < AP_DEVICES; card++)
  747. if (test_bit_inv(card, apm) &&
  748. test_bit_inv(card, ap_perms.apm))
  749. for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
  750. if (test_bit_inv(queue, aqm) &&
  751. test_bit_inv(queue, ap_perms.aqm))
  752. rc = 1;
  753. return rc;
  754. }
  755. EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
  756. static int ap_device_probe(struct device *dev)
  757. {
  758. struct ap_device *ap_dev = to_ap_dev(dev);
  759. struct ap_driver *ap_drv = to_ap_drv(dev->driver);
  760. int card, queue, devres, drvres, rc = -ENODEV;
  761. if (!get_device(dev))
  762. return rc;
  763. if (is_queue_dev(dev)) {
  764. /*
  765. * If the apqn is marked as reserved/used by ap bus and
  766. * default drivers, only probe with drivers with the default
  767. * flag set. If it is not marked, only probe with drivers
  768. * with the default flag not set.
  769. */
  770. card = AP_QID_CARD(to_ap_queue(dev)->qid);
  771. queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
  772. mutex_lock(&ap_perms_mutex);
  773. devres = test_bit_inv(card, ap_perms.apm) &&
  774. test_bit_inv(queue, ap_perms.aqm);
  775. mutex_unlock(&ap_perms_mutex);
  776. drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
  777. if (!!devres != !!drvres)
  778. goto out;
  779. }
  780. /*
  781. * Rearm the bindings complete completion to trigger
  782. * bindings complete when all devices are bound again
  783. */
  784. reinit_completion(&ap_apqn_bindings_complete);
  785. /* Add queue/card to list of active queues/cards */
  786. spin_lock_bh(&ap_queues_lock);
  787. if (is_queue_dev(dev))
  788. hash_add(ap_queues, &to_ap_queue(dev)->hnode,
  789. to_ap_queue(dev)->qid);
  790. spin_unlock_bh(&ap_queues_lock);
  791. rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
  792. if (rc) {
  793. spin_lock_bh(&ap_queues_lock);
  794. if (is_queue_dev(dev))
  795. hash_del(&to_ap_queue(dev)->hnode);
  796. spin_unlock_bh(&ap_queues_lock);
  797. }
  798. out:
  799. if (rc)
  800. put_device(dev);
  801. return rc;
  802. }
  803. static void ap_device_remove(struct device *dev)
  804. {
  805. struct ap_device *ap_dev = to_ap_dev(dev);
  806. struct ap_driver *ap_drv = to_ap_drv(dev->driver);
  807. /* prepare ap queue device removal */
  808. if (is_queue_dev(dev))
  809. ap_queue_prepare_remove(to_ap_queue(dev));
  810. /* driver's chance to clean up gracefully */
  811. if (ap_drv->remove)
  812. ap_drv->remove(ap_dev);
  813. /* now do the ap queue device remove */
  814. if (is_queue_dev(dev))
  815. ap_queue_remove(to_ap_queue(dev));
  816. /* Remove queue/card from list of active queues/cards */
  817. spin_lock_bh(&ap_queues_lock);
  818. if (is_queue_dev(dev))
  819. hash_del(&to_ap_queue(dev)->hnode);
  820. spin_unlock_bh(&ap_queues_lock);
  821. put_device(dev);
  822. }
  823. struct ap_queue *ap_get_qdev(ap_qid_t qid)
  824. {
  825. int bkt;
  826. struct ap_queue *aq;
  827. spin_lock_bh(&ap_queues_lock);
  828. hash_for_each(ap_queues, bkt, aq, hnode) {
  829. if (aq->qid == qid) {
  830. get_device(&aq->ap_dev.device);
  831. spin_unlock_bh(&ap_queues_lock);
  832. return aq;
  833. }
  834. }
  835. spin_unlock_bh(&ap_queues_lock);
  836. return NULL;
  837. }
  838. EXPORT_SYMBOL(ap_get_qdev);
  839. int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
  840. char *name)
  841. {
  842. struct device_driver *drv = &ap_drv->driver;
  843. int rc;
  844. drv->bus = &ap_bus_type;
  845. drv->owner = owner;
  846. drv->name = name;
  847. rc = driver_register(drv);
  848. ap_check_bindings_complete();
  849. return rc;
  850. }
  851. EXPORT_SYMBOL(ap_driver_register);
  852. void ap_driver_unregister(struct ap_driver *ap_drv)
  853. {
  854. driver_unregister(&ap_drv->driver);
  855. }
  856. EXPORT_SYMBOL(ap_driver_unregister);
  857. /*
  858. * Enforce a synchronous AP bus rescan.
  859. * Returns true if the bus scan finds a change in the AP configuration
  860. * and AP devices have been added or deleted when this function returns.
  861. */
  862. bool ap_bus_force_rescan(void)
  863. {
  864. unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
  865. bool rc = false;
  866. pr_debug("> scan counter=%lu\n", scan_counter);
  867. /* Only trigger AP bus scans after the initial scan is done */
  868. if (scan_counter <= 0)
  869. goto out;
  870. /*
  871. * There is one unlikely but nevertheless valid scenario where the
  872. * thread holding the mutex may try to send some crypto load but
  873. * all cards are offline so a rescan is triggered which causes
  874. * a recursive call of ap_bus_force_rescan(). A simple return if
  875. * the mutex is already locked by this thread solves this.
  876. */
  877. if (mutex_is_locked(&ap_scan_bus_mutex)) {
  878. if (ap_scan_bus_task == current)
  879. goto out;
  880. }
  881. /* Try to acquire the AP scan bus mutex */
  882. if (mutex_trylock(&ap_scan_bus_mutex)) {
  883. /* mutex acquired, run the AP bus scan */
  884. ap_scan_bus_task = current;
  885. ap_scan_bus_result = ap_scan_bus();
  886. rc = ap_scan_bus_result;
  887. ap_scan_bus_task = NULL;
  888. mutex_unlock(&ap_scan_bus_mutex);
  889. goto out;
  890. }
  891. /*
  892. * Mutex acquire failed. So there is currently another task
  893. * already running the AP bus scan. Then let's simple wait
  894. * for the lock which means the other task has finished and
  895. * stored the result in ap_scan_bus_result.
  896. */
  897. if (mutex_lock_interruptible(&ap_scan_bus_mutex)) {
  898. /* some error occurred, ignore and go out */
  899. goto out;
  900. }
  901. rc = ap_scan_bus_result;
  902. mutex_unlock(&ap_scan_bus_mutex);
  903. out:
  904. pr_debug("rc=%d\n", rc);
  905. return rc;
  906. }
  907. EXPORT_SYMBOL(ap_bus_force_rescan);
  908. /*
  909. * A config change has happened, force an ap bus rescan.
  910. */
  911. static int ap_bus_cfg_chg(struct notifier_block *nb,
  912. unsigned long action, void *data)
  913. {
  914. if (action != CHSC_NOTIFY_AP_CFG)
  915. return NOTIFY_DONE;
  916. pr_debug("config change, forcing bus rescan\n");
  917. ap_bus_force_rescan();
  918. return NOTIFY_OK;
  919. }
  920. static struct notifier_block ap_bus_nb = {
  921. .notifier_call = ap_bus_cfg_chg,
  922. };
  923. int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits)
  924. {
  925. int i, n, b;
  926. /* bits needs to be a multiple of 8 */
  927. if (bits & 0x07)
  928. return -EINVAL;
  929. if (str[0] == '0' && str[1] == 'x')
  930. str++;
  931. if (*str == 'x')
  932. str++;
  933. for (i = 0; isxdigit(*str) && i < bits; str++) {
  934. b = hex_to_bin(*str);
  935. for (n = 0; n < 4; n++)
  936. if (b & (0x08 >> n))
  937. set_bit_inv(i + n, bitmap);
  938. i += 4;
  939. }
  940. if (*str == '\n')
  941. str++;
  942. if (*str)
  943. return -EINVAL;
  944. return 0;
  945. }
  946. EXPORT_SYMBOL(ap_hex2bitmap);
  947. /*
  948. * modify_bitmap() - parse bitmask argument and modify an existing
  949. * bit mask accordingly. A concatenation (done with ',') of these
  950. * terms is recognized:
  951. * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
  952. * <bitnr> may be any valid number (hex, decimal or octal) in the range
  953. * 0...bits-1; the leading + or - is required. Here are some examples:
  954. * +0-15,+32,-128,-0xFF
  955. * -0-255,+1-16,+0x128
  956. * +1,+2,+3,+4,-5,-7-10
  957. * Returns the new bitmap after all changes have been applied. Every
  958. * positive value in the string will set a bit and every negative value
  959. * in the string will clear a bit. As a bit may be touched more than once,
  960. * the last 'operation' wins:
  961. * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
  962. * cleared again. All other bits are unmodified.
  963. */
  964. static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
  965. {
  966. unsigned long a, i, z;
  967. char *np, sign;
  968. /* bits needs to be a multiple of 8 */
  969. if (bits & 0x07)
  970. return -EINVAL;
  971. while (*str) {
  972. sign = *str++;
  973. if (sign != '+' && sign != '-')
  974. return -EINVAL;
  975. a = z = simple_strtoul(str, &np, 0);
  976. if (str == np || a >= bits)
  977. return -EINVAL;
  978. str = np;
  979. if (*str == '-') {
  980. z = simple_strtoul(++str, &np, 0);
  981. if (str == np || a > z || z >= bits)
  982. return -EINVAL;
  983. str = np;
  984. }
  985. for (i = a; i <= z; i++)
  986. if (sign == '+')
  987. set_bit_inv(i, bitmap);
  988. else
  989. clear_bit_inv(i, bitmap);
  990. while (*str == ',' || *str == '\n')
  991. str++;
  992. }
  993. return 0;
  994. }
  995. static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
  996. unsigned long *newmap)
  997. {
  998. unsigned long size;
  999. int rc;
  1000. size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
  1001. if (*str == '+' || *str == '-') {
  1002. memcpy(newmap, bitmap, size);
  1003. rc = modify_bitmap(str, newmap, bits);
  1004. } else {
  1005. memset(newmap, 0, size);
  1006. rc = ap_hex2bitmap(str, newmap, bits);
  1007. }
  1008. return rc;
  1009. }
  1010. int ap_parse_mask_str(const char *str,
  1011. unsigned long *bitmap, int bits,
  1012. struct mutex *lock)
  1013. {
  1014. unsigned long *newmap, size;
  1015. int rc;
  1016. /* bits needs to be a multiple of 8 */
  1017. if (bits & 0x07)
  1018. return -EINVAL;
  1019. size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
  1020. newmap = kmalloc(size, GFP_KERNEL);
  1021. if (!newmap)
  1022. return -ENOMEM;
  1023. if (mutex_lock_interruptible(lock)) {
  1024. kfree(newmap);
  1025. return -ERESTARTSYS;
  1026. }
  1027. rc = ap_parse_bitmap_str(str, bitmap, bits, newmap);
  1028. if (rc == 0)
  1029. memcpy(bitmap, newmap, size);
  1030. mutex_unlock(lock);
  1031. kfree(newmap);
  1032. return rc;
  1033. }
  1034. EXPORT_SYMBOL(ap_parse_mask_str);
  1035. /*
  1036. * AP bus attributes.
  1037. */
  1038. static ssize_t ap_domain_show(const struct bus_type *bus, char *buf)
  1039. {
  1040. return sysfs_emit(buf, "%d\n", ap_domain_index);
  1041. }
  1042. static ssize_t ap_domain_store(const struct bus_type *bus,
  1043. const char *buf, size_t count)
  1044. {
  1045. int domain;
  1046. if (sscanf(buf, "%i\n", &domain) != 1 ||
  1047. domain < 0 || domain > ap_max_domain_id ||
  1048. !test_bit_inv(domain, ap_perms.aqm))
  1049. return -EINVAL;
  1050. spin_lock_bh(&ap_domain_lock);
  1051. ap_domain_index = domain;
  1052. spin_unlock_bh(&ap_domain_lock);
  1053. AP_DBF_INFO("%s stored new default domain=%d\n",
  1054. __func__, domain);
  1055. return count;
  1056. }
  1057. static BUS_ATTR_RW(ap_domain);
  1058. static ssize_t ap_control_domain_mask_show(const struct bus_type *bus, char *buf)
  1059. {
  1060. if (!ap_qci_info->flags) /* QCI not supported */
  1061. return sysfs_emit(buf, "not supported\n");
  1062. return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
  1063. ap_qci_info->adm[0], ap_qci_info->adm[1],
  1064. ap_qci_info->adm[2], ap_qci_info->adm[3],
  1065. ap_qci_info->adm[4], ap_qci_info->adm[5],
  1066. ap_qci_info->adm[6], ap_qci_info->adm[7]);
  1067. }
  1068. static BUS_ATTR_RO(ap_control_domain_mask);
  1069. static ssize_t ap_usage_domain_mask_show(const struct bus_type *bus, char *buf)
  1070. {
  1071. if (!ap_qci_info->flags) /* QCI not supported */
  1072. return sysfs_emit(buf, "not supported\n");
  1073. return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
  1074. ap_qci_info->aqm[0], ap_qci_info->aqm[1],
  1075. ap_qci_info->aqm[2], ap_qci_info->aqm[3],
  1076. ap_qci_info->aqm[4], ap_qci_info->aqm[5],
  1077. ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
  1078. }
  1079. static BUS_ATTR_RO(ap_usage_domain_mask);
  1080. static ssize_t ap_adapter_mask_show(const struct bus_type *bus, char *buf)
  1081. {
  1082. if (!ap_qci_info->flags) /* QCI not supported */
  1083. return sysfs_emit(buf, "not supported\n");
  1084. return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
  1085. ap_qci_info->apm[0], ap_qci_info->apm[1],
  1086. ap_qci_info->apm[2], ap_qci_info->apm[3],
  1087. ap_qci_info->apm[4], ap_qci_info->apm[5],
  1088. ap_qci_info->apm[6], ap_qci_info->apm[7]);
  1089. }
  1090. static BUS_ATTR_RO(ap_adapter_mask);
  1091. static ssize_t ap_interrupts_show(const struct bus_type *bus, char *buf)
  1092. {
  1093. return sysfs_emit(buf, "%d\n", ap_irq_flag ? 1 : 0);
  1094. }
  1095. static BUS_ATTR_RO(ap_interrupts);
  1096. static ssize_t config_time_show(const struct bus_type *bus, char *buf)
  1097. {
  1098. return sysfs_emit(buf, "%d\n", ap_scan_bus_time);
  1099. }
  1100. static ssize_t config_time_store(const struct bus_type *bus,
  1101. const char *buf, size_t count)
  1102. {
  1103. int time;
  1104. if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
  1105. return -EINVAL;
  1106. ap_scan_bus_time = time;
  1107. mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
  1108. return count;
  1109. }
  1110. static BUS_ATTR_RW(config_time);
  1111. static ssize_t poll_thread_show(const struct bus_type *bus, char *buf)
  1112. {
  1113. return sysfs_emit(buf, "%d\n", ap_poll_kthread ? 1 : 0);
  1114. }
  1115. static ssize_t poll_thread_store(const struct bus_type *bus,
  1116. const char *buf, size_t count)
  1117. {
  1118. bool value;
  1119. int rc;
  1120. rc = kstrtobool(buf, &value);
  1121. if (rc)
  1122. return rc;
  1123. if (value) {
  1124. rc = ap_poll_thread_start();
  1125. if (rc)
  1126. count = rc;
  1127. } else {
  1128. ap_poll_thread_stop();
  1129. }
  1130. return count;
  1131. }
  1132. static BUS_ATTR_RW(poll_thread);
  1133. static ssize_t poll_timeout_show(const struct bus_type *bus, char *buf)
  1134. {
  1135. return sysfs_emit(buf, "%lu\n", poll_high_timeout);
  1136. }
  1137. static ssize_t poll_timeout_store(const struct bus_type *bus, const char *buf,
  1138. size_t count)
  1139. {
  1140. unsigned long value;
  1141. ktime_t hr_time;
  1142. int rc;
  1143. rc = kstrtoul(buf, 0, &value);
  1144. if (rc)
  1145. return rc;
  1146. /* 120 seconds = maximum poll interval */
  1147. if (value > 120000000000UL)
  1148. return -EINVAL;
  1149. poll_high_timeout = value;
  1150. hr_time = poll_high_timeout;
  1151. spin_lock_bh(&ap_poll_timer_lock);
  1152. hrtimer_cancel(&ap_poll_timer);
  1153. hrtimer_set_expires(&ap_poll_timer, hr_time);
  1154. hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
  1155. spin_unlock_bh(&ap_poll_timer_lock);
  1156. return count;
  1157. }
  1158. static BUS_ATTR_RW(poll_timeout);
  1159. static ssize_t ap_max_domain_id_show(const struct bus_type *bus, char *buf)
  1160. {
  1161. return sysfs_emit(buf, "%d\n", ap_max_domain_id);
  1162. }
  1163. static BUS_ATTR_RO(ap_max_domain_id);
  1164. static ssize_t ap_max_adapter_id_show(const struct bus_type *bus, char *buf)
  1165. {
  1166. return sysfs_emit(buf, "%d\n", ap_max_adapter_id);
  1167. }
  1168. static BUS_ATTR_RO(ap_max_adapter_id);
  1169. static ssize_t apmask_show(const struct bus_type *bus, char *buf)
  1170. {
  1171. int rc;
  1172. if (mutex_lock_interruptible(&ap_perms_mutex))
  1173. return -ERESTARTSYS;
  1174. rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
  1175. ap_perms.apm[0], ap_perms.apm[1],
  1176. ap_perms.apm[2], ap_perms.apm[3]);
  1177. mutex_unlock(&ap_perms_mutex);
  1178. return rc;
  1179. }
  1180. static int __verify_card_reservations(struct device_driver *drv, void *data)
  1181. {
  1182. int rc = 0;
  1183. struct ap_driver *ap_drv = to_ap_drv(drv);
  1184. unsigned long *newapm = (unsigned long *)data;
  1185. /*
  1186. * increase the driver's module refcounter to be sure it is not
  1187. * going away when we invoke the callback function.
  1188. */
  1189. if (!try_module_get(drv->owner))
  1190. return 0;
  1191. if (ap_drv->in_use) {
  1192. rc = ap_drv->in_use(newapm, ap_perms.aqm);
  1193. if (rc)
  1194. rc = -EBUSY;
  1195. }
  1196. /* release the driver's module */
  1197. module_put(drv->owner);
  1198. return rc;
  1199. }
  1200. static int apmask_commit(unsigned long *newapm)
  1201. {
  1202. int rc;
  1203. unsigned long reserved[BITS_TO_LONGS(AP_DEVICES)];
  1204. /*
  1205. * Check if any bits in the apmask have been set which will
  1206. * result in queues being removed from non-default drivers
  1207. */
  1208. if (bitmap_andnot(reserved, newapm, ap_perms.apm, AP_DEVICES)) {
  1209. rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
  1210. __verify_card_reservations);
  1211. if (rc)
  1212. return rc;
  1213. }
  1214. memcpy(ap_perms.apm, newapm, APMASKSIZE);
  1215. return 0;
  1216. }
  1217. static ssize_t apmask_store(const struct bus_type *bus, const char *buf,
  1218. size_t count)
  1219. {
  1220. int rc, changes = 0;
  1221. DECLARE_BITMAP(newapm, AP_DEVICES);
  1222. if (mutex_lock_interruptible(&ap_perms_mutex))
  1223. return -ERESTARTSYS;
  1224. rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
  1225. if (rc)
  1226. goto done;
  1227. changes = memcmp(ap_perms.apm, newapm, APMASKSIZE);
  1228. if (changes)
  1229. rc = apmask_commit(newapm);
  1230. done:
  1231. mutex_unlock(&ap_perms_mutex);
  1232. if (rc)
  1233. return rc;
  1234. if (changes) {
  1235. ap_bus_revise_bindings();
  1236. ap_send_mask_changed_uevent(newapm, NULL);
  1237. }
  1238. return count;
  1239. }
  1240. static BUS_ATTR_RW(apmask);
  1241. static ssize_t aqmask_show(const struct bus_type *bus, char *buf)
  1242. {
  1243. int rc;
  1244. if (mutex_lock_interruptible(&ap_perms_mutex))
  1245. return -ERESTARTSYS;
  1246. rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
  1247. ap_perms.aqm[0], ap_perms.aqm[1],
  1248. ap_perms.aqm[2], ap_perms.aqm[3]);
  1249. mutex_unlock(&ap_perms_mutex);
  1250. return rc;
  1251. }
  1252. static int __verify_queue_reservations(struct device_driver *drv, void *data)
  1253. {
  1254. int rc = 0;
  1255. struct ap_driver *ap_drv = to_ap_drv(drv);
  1256. unsigned long *newaqm = (unsigned long *)data;
  1257. /*
  1258. * increase the driver's module refcounter to be sure it is not
  1259. * going away when we invoke the callback function.
  1260. */
  1261. if (!try_module_get(drv->owner))
  1262. return 0;
  1263. if (ap_drv->in_use) {
  1264. rc = ap_drv->in_use(ap_perms.apm, newaqm);
  1265. if (rc)
  1266. rc = -EBUSY;
  1267. }
  1268. /* release the driver's module */
  1269. module_put(drv->owner);
  1270. return rc;
  1271. }
  1272. static int aqmask_commit(unsigned long *newaqm)
  1273. {
  1274. int rc;
  1275. unsigned long reserved[BITS_TO_LONGS(AP_DOMAINS)];
  1276. /*
  1277. * Check if any bits in the aqmask have been set which will
  1278. * result in queues being removed from non-default drivers
  1279. */
  1280. if (bitmap_andnot(reserved, newaqm, ap_perms.aqm, AP_DOMAINS)) {
  1281. rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
  1282. __verify_queue_reservations);
  1283. if (rc)
  1284. return rc;
  1285. }
  1286. memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);
  1287. return 0;
  1288. }
  1289. static ssize_t aqmask_store(const struct bus_type *bus, const char *buf,
  1290. size_t count)
  1291. {
  1292. int rc, changes = 0;
  1293. DECLARE_BITMAP(newaqm, AP_DOMAINS);
  1294. if (mutex_lock_interruptible(&ap_perms_mutex))
  1295. return -ERESTARTSYS;
  1296. rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
  1297. if (rc)
  1298. goto done;
  1299. changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE);
  1300. if (changes)
  1301. rc = aqmask_commit(newaqm);
  1302. done:
  1303. mutex_unlock(&ap_perms_mutex);
  1304. if (rc)
  1305. return rc;
  1306. if (changes) {
  1307. ap_bus_revise_bindings();
  1308. ap_send_mask_changed_uevent(NULL, newaqm);
  1309. }
  1310. return count;
  1311. }
  1312. static BUS_ATTR_RW(aqmask);
  1313. static ssize_t scans_show(const struct bus_type *bus, char *buf)
  1314. {
  1315. return sysfs_emit(buf, "%llu\n", atomic64_read(&ap_scan_bus_count));
  1316. }
  1317. static ssize_t scans_store(const struct bus_type *bus, const char *buf,
  1318. size_t count)
  1319. {
  1320. AP_DBF_INFO("%s force AP bus rescan\n", __func__);
  1321. ap_bus_force_rescan();
  1322. return count;
  1323. }
  1324. static BUS_ATTR_RW(scans);
  1325. static ssize_t bindings_show(const struct bus_type *bus, char *buf)
  1326. {
  1327. int rc;
  1328. unsigned int apqns, n;
  1329. ap_calc_bound_apqns(&apqns, &n);
  1330. if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
  1331. rc = sysfs_emit(buf, "%u/%u (complete)\n", n, apqns);
  1332. else
  1333. rc = sysfs_emit(buf, "%u/%u\n", n, apqns);
  1334. return rc;
  1335. }
  1336. static BUS_ATTR_RO(bindings);
  1337. static ssize_t features_show(const struct bus_type *bus, char *buf)
  1338. {
  1339. int n = 0;
  1340. if (!ap_qci_info->flags) /* QCI not supported */
  1341. return sysfs_emit(buf, "-\n");
  1342. if (ap_qci_info->apsc)
  1343. n += sysfs_emit_at(buf, n, "APSC ");
  1344. if (ap_qci_info->apxa)
  1345. n += sysfs_emit_at(buf, n, "APXA ");
  1346. if (ap_qci_info->qact)
  1347. n += sysfs_emit_at(buf, n, "QACT ");
  1348. if (ap_qci_info->rc8a)
  1349. n += sysfs_emit_at(buf, n, "RC8A ");
  1350. if (ap_qci_info->apsb)
  1351. n += sysfs_emit_at(buf, n, "APSB ");
  1352. sysfs_emit_at(buf, n == 0 ? 0 : n - 1, "\n");
  1353. return n;
  1354. }
  1355. static BUS_ATTR_RO(features);
  1356. static struct attribute *ap_bus_attrs[] = {
  1357. &bus_attr_ap_domain.attr,
  1358. &bus_attr_ap_control_domain_mask.attr,
  1359. &bus_attr_ap_usage_domain_mask.attr,
  1360. &bus_attr_ap_adapter_mask.attr,
  1361. &bus_attr_config_time.attr,
  1362. &bus_attr_poll_thread.attr,
  1363. &bus_attr_ap_interrupts.attr,
  1364. &bus_attr_poll_timeout.attr,
  1365. &bus_attr_ap_max_domain_id.attr,
  1366. &bus_attr_ap_max_adapter_id.attr,
  1367. &bus_attr_apmask.attr,
  1368. &bus_attr_aqmask.attr,
  1369. &bus_attr_scans.attr,
  1370. &bus_attr_bindings.attr,
  1371. &bus_attr_features.attr,
  1372. NULL,
  1373. };
  1374. ATTRIBUTE_GROUPS(ap_bus);
  1375. static const struct bus_type ap_bus_type = {
  1376. .name = "ap",
  1377. .bus_groups = ap_bus_groups,
  1378. .match = &ap_bus_match,
  1379. .uevent = &ap_uevent,
  1380. .probe = ap_device_probe,
  1381. .remove = ap_device_remove,
  1382. };
  1383. /**
  1384. * ap_select_domain(): Select an AP domain if possible and we haven't
  1385. * already done so before.
  1386. */
  1387. static void ap_select_domain(void)
  1388. {
  1389. struct ap_queue_status status;
  1390. int card, dom;
  1391. /*
  1392. * Choose the default domain. Either the one specified with
  1393. * the "domain=" parameter or the first domain with at least
  1394. * one valid APQN.
  1395. */
  1396. spin_lock_bh(&ap_domain_lock);
  1397. if (ap_domain_index >= 0) {
  1398. /* Domain has already been selected. */
  1399. goto out;
  1400. }
  1401. for (dom = 0; dom <= ap_max_domain_id; dom++) {
  1402. if (!ap_test_config_usage_domain(dom) ||
  1403. !test_bit_inv(dom, ap_perms.aqm))
  1404. continue;
  1405. for (card = 0; card <= ap_max_adapter_id; card++) {
  1406. if (!ap_test_config_card_id(card) ||
  1407. !test_bit_inv(card, ap_perms.apm))
  1408. continue;
  1409. status = ap_test_queue(AP_MKQID(card, dom),
  1410. ap_apft_available(),
  1411. NULL);
  1412. if (status.response_code == AP_RESPONSE_NORMAL)
  1413. break;
  1414. }
  1415. if (card <= ap_max_adapter_id)
  1416. break;
  1417. }
  1418. if (dom <= ap_max_domain_id) {
  1419. ap_domain_index = dom;
  1420. AP_DBF_INFO("%s new default domain is %d\n",
  1421. __func__, ap_domain_index);
  1422. }
  1423. out:
  1424. spin_unlock_bh(&ap_domain_lock);
  1425. }
  1426. /*
  1427. * This function checks the type and returns either 0 for not
  1428. * supported or the highest compatible type value (which may
  1429. * include the input type value).
  1430. */
  1431. static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
  1432. {
  1433. int comp_type = 0;
  1434. /* < CEX4 is not supported */
  1435. if (rawtype < AP_DEVICE_TYPE_CEX4) {
  1436. AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
  1437. __func__, AP_QID_CARD(qid),
  1438. AP_QID_QUEUE(qid), rawtype);
  1439. return 0;
  1440. }
  1441. /* up to CEX8 known and fully supported */
  1442. if (rawtype <= AP_DEVICE_TYPE_CEX8)
  1443. return rawtype;
  1444. /*
  1445. * unknown new type > CEX8, check for compatibility
  1446. * to the highest known and supported type which is
  1447. * currently CEX8 with the help of the QACT function.
  1448. */
  1449. if (ap_qact_available()) {
  1450. struct ap_queue_status status;
  1451. union ap_qact_ap_info apinfo = {0};
  1452. apinfo.mode = (func >> 26) & 0x07;
  1453. apinfo.cat = AP_DEVICE_TYPE_CEX8;
  1454. status = ap_qact(qid, 0, &apinfo);
  1455. if (status.response_code == AP_RESPONSE_NORMAL &&
  1456. apinfo.cat >= AP_DEVICE_TYPE_CEX4 &&
  1457. apinfo.cat <= AP_DEVICE_TYPE_CEX8)
  1458. comp_type = apinfo.cat;
  1459. }
  1460. if (!comp_type)
  1461. AP_DBF_WARN("%s queue=%02x.%04x unable to map type %d\n",
  1462. __func__, AP_QID_CARD(qid),
  1463. AP_QID_QUEUE(qid), rawtype);
  1464. else if (comp_type != rawtype)
  1465. AP_DBF_INFO("%s queue=%02x.%04x map type %d to %d\n",
  1466. __func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
  1467. rawtype, comp_type);
  1468. return comp_type;
  1469. }
  1470. /*
  1471. * Helper function to be used with bus_find_dev
  1472. * matches for the card device with the given id
  1473. */
  1474. static int __match_card_device_with_id(struct device *dev, const void *data)
  1475. {
  1476. return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data;
  1477. }
  1478. /*
  1479. * Helper function to be used with bus_find_dev
  1480. * matches for the queue device with a given qid
  1481. */
  1482. static int __match_queue_device_with_qid(struct device *dev, const void *data)
  1483. {
  1484. return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
  1485. }
  1486. /*
  1487. * Helper function to be used with bus_find_dev
  1488. * matches any queue device with given queue id
  1489. */
  1490. static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
  1491. {
  1492. return is_queue_dev(dev) &&
  1493. AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
  1494. }
  1495. /* Helper function for notify_config_changed */
  1496. static int __drv_notify_config_changed(struct device_driver *drv, void *data)
  1497. {
  1498. struct ap_driver *ap_drv = to_ap_drv(drv);
  1499. if (try_module_get(drv->owner)) {
  1500. if (ap_drv->on_config_changed)
  1501. ap_drv->on_config_changed(ap_qci_info, ap_qci_info_old);
  1502. module_put(drv->owner);
  1503. }
  1504. return 0;
  1505. }
  1506. /* Notify all drivers about an qci config change */
  1507. static inline void notify_config_changed(void)
  1508. {
  1509. bus_for_each_drv(&ap_bus_type, NULL, NULL,
  1510. __drv_notify_config_changed);
  1511. }
  1512. /* Helper function for notify_scan_complete */
  1513. static int __drv_notify_scan_complete(struct device_driver *drv, void *data)
  1514. {
  1515. struct ap_driver *ap_drv = to_ap_drv(drv);
  1516. if (try_module_get(drv->owner)) {
  1517. if (ap_drv->on_scan_complete)
  1518. ap_drv->on_scan_complete(ap_qci_info,
  1519. ap_qci_info_old);
  1520. module_put(drv->owner);
  1521. }
  1522. return 0;
  1523. }
  1524. /* Notify all drivers about bus scan complete */
  1525. static inline void notify_scan_complete(void)
  1526. {
  1527. bus_for_each_drv(&ap_bus_type, NULL, NULL,
  1528. __drv_notify_scan_complete);
  1529. }
  1530. /*
  1531. * Helper function for ap_scan_bus().
  1532. * Remove card device and associated queue devices.
  1533. */
  1534. static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
  1535. {
  1536. bus_for_each_dev(&ap_bus_type, NULL,
  1537. (void *)(long)ac->id,
  1538. __ap_queue_devices_with_id_unregister);
  1539. device_unregister(&ac->ap_dev.device);
  1540. }
  1541. /*
  1542. * Helper function for ap_scan_bus().
  1543. * Does the scan bus job for all the domains within
  1544. * a valid adapter given by an ap_card ptr.
  1545. */
  1546. static inline void ap_scan_domains(struct ap_card *ac)
  1547. {
  1548. struct ap_tapq_hwinfo hwinfo;
  1549. bool decfg, chkstop;
  1550. struct ap_queue *aq;
  1551. struct device *dev;
  1552. ap_qid_t qid;
  1553. int rc, dom;
  1554. /*
  1555. * Go through the configuration for the domains and compare them
  1556. * to the existing queue devices. Also take care of the config
  1557. * and error state for the queue devices.
  1558. */
  1559. for (dom = 0; dom <= ap_max_domain_id; dom++) {
  1560. qid = AP_MKQID(ac->id, dom);
  1561. dev = bus_find_device(&ap_bus_type, NULL,
  1562. (void *)(long)qid,
  1563. __match_queue_device_with_qid);
  1564. aq = dev ? to_ap_queue(dev) : NULL;
  1565. if (!ap_test_config_usage_domain(dom)) {
  1566. if (dev) {
  1567. AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
  1568. __func__, ac->id, dom);
  1569. device_unregister(dev);
  1570. }
  1571. goto put_dev_and_continue;
  1572. }
  1573. /* domain is valid, get info from this APQN */
  1574. rc = ap_queue_info(qid, &hwinfo, &decfg, &chkstop);
  1575. switch (rc) {
  1576. case -1:
  1577. if (dev) {
  1578. AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
  1579. __func__, ac->id, dom);
  1580. device_unregister(dev);
  1581. }
  1582. fallthrough;
  1583. case 0:
  1584. goto put_dev_and_continue;
  1585. default:
  1586. break;
  1587. }
  1588. /* if no queue device exists, create a new one */
  1589. if (!aq) {
  1590. aq = ap_queue_create(qid, ac);
  1591. if (!aq) {
  1592. AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
  1593. __func__, ac->id, dom);
  1594. continue;
  1595. }
  1596. aq->config = !decfg;
  1597. aq->chkstop = chkstop;
  1598. aq->se_bstate = hwinfo.bs;
  1599. dev = &aq->ap_dev.device;
  1600. dev->bus = &ap_bus_type;
  1601. dev->parent = &ac->ap_dev.device;
  1602. dev_set_name(dev, "%02x.%04x", ac->id, dom);
  1603. /* register queue device */
  1604. rc = device_register(dev);
  1605. if (rc) {
  1606. AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
  1607. __func__, ac->id, dom);
  1608. goto put_dev_and_continue;
  1609. }
  1610. /* get it and thus adjust reference counter */
  1611. get_device(dev);
  1612. if (decfg) {
  1613. AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
  1614. __func__, ac->id, dom);
  1615. } else if (chkstop) {
  1616. AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
  1617. __func__, ac->id, dom);
  1618. } else {
  1619. /* nudge the queue's state machine */
  1620. ap_queue_init_state(aq);
  1621. AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
  1622. __func__, ac->id, dom);
  1623. }
  1624. goto put_dev_and_continue;
  1625. }
  1626. /* handle state changes on already existing queue device */
  1627. spin_lock_bh(&aq->lock);
  1628. /* SE bind state */
  1629. aq->se_bstate = hwinfo.bs;
  1630. /* checkstop state */
  1631. if (chkstop && !aq->chkstop) {
  1632. /* checkstop on */
  1633. aq->chkstop = true;
  1634. if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
  1635. aq->dev_state = AP_DEV_STATE_ERROR;
  1636. aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
  1637. }
  1638. spin_unlock_bh(&aq->lock);
  1639. pr_debug("(%d,%d) queue dev checkstop on\n",
  1640. ac->id, dom);
  1641. /* 'receive' pending messages with -EAGAIN */
  1642. ap_flush_queue(aq);
  1643. goto put_dev_and_continue;
  1644. } else if (!chkstop && aq->chkstop) {
  1645. /* checkstop off */
  1646. aq->chkstop = false;
  1647. if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
  1648. _ap_queue_init_state(aq);
  1649. spin_unlock_bh(&aq->lock);
  1650. pr_debug("(%d,%d) queue dev checkstop off\n",
  1651. ac->id, dom);
  1652. goto put_dev_and_continue;
  1653. }
  1654. /* config state change */
  1655. if (decfg && aq->config) {
  1656. /* config off this queue device */
  1657. aq->config = false;
  1658. if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
  1659. aq->dev_state = AP_DEV_STATE_ERROR;
  1660. aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
  1661. }
  1662. spin_unlock_bh(&aq->lock);
  1663. pr_debug("(%d,%d) queue dev config off\n",
  1664. ac->id, dom);
  1665. ap_send_config_uevent(&aq->ap_dev, aq->config);
  1666. /* 'receive' pending messages with -EAGAIN */
  1667. ap_flush_queue(aq);
  1668. goto put_dev_and_continue;
  1669. } else if (!decfg && !aq->config) {
  1670. /* config on this queue device */
  1671. aq->config = true;
  1672. if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
  1673. _ap_queue_init_state(aq);
  1674. spin_unlock_bh(&aq->lock);
  1675. pr_debug("(%d,%d) queue dev config on\n",
  1676. ac->id, dom);
  1677. ap_send_config_uevent(&aq->ap_dev, aq->config);
  1678. goto put_dev_and_continue;
  1679. }
  1680. /* handle other error states */
  1681. if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
  1682. spin_unlock_bh(&aq->lock);
  1683. /* 'receive' pending messages with -EAGAIN */
  1684. ap_flush_queue(aq);
  1685. /* re-init (with reset) the queue device */
  1686. ap_queue_init_state(aq);
  1687. AP_DBF_INFO("%s(%d,%d) queue dev reinit enforced\n",
  1688. __func__, ac->id, dom);
  1689. goto put_dev_and_continue;
  1690. }
  1691. spin_unlock_bh(&aq->lock);
  1692. put_dev_and_continue:
  1693. put_device(dev);
  1694. }
  1695. }
  1696. /*
  1697. * Helper function for ap_scan_bus().
  1698. * Does the scan bus job for the given adapter id.
  1699. */
  1700. static inline void ap_scan_adapter(int ap)
  1701. {
  1702. struct ap_tapq_hwinfo hwinfo;
  1703. int rc, dom, comp_type;
  1704. bool decfg, chkstop;
  1705. struct ap_card *ac;
  1706. struct device *dev;
  1707. ap_qid_t qid;
  1708. /* Is there currently a card device for this adapter ? */
  1709. dev = bus_find_device(&ap_bus_type, NULL,
  1710. (void *)(long)ap,
  1711. __match_card_device_with_id);
  1712. ac = dev ? to_ap_card(dev) : NULL;
  1713. /* Adapter not in configuration ? */
  1714. if (!ap_test_config_card_id(ap)) {
  1715. if (ac) {
  1716. AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devs\n",
  1717. __func__, ap);
  1718. ap_scan_rm_card_dev_and_queue_devs(ac);
  1719. put_device(dev);
  1720. }
  1721. return;
  1722. }
  1723. /*
  1724. * Adapter ap is valid in the current configuration. So do some checks:
  1725. * If no card device exists, build one. If a card device exists, check
  1726. * for type and functions changed. For all this we need to find a valid
  1727. * APQN first.
  1728. */
  1729. for (dom = 0; dom <= ap_max_domain_id; dom++)
  1730. if (ap_test_config_usage_domain(dom)) {
  1731. qid = AP_MKQID(ap, dom);
  1732. if (ap_queue_info(qid, &hwinfo, &decfg, &chkstop) > 0)
  1733. break;
  1734. }
  1735. if (dom > ap_max_domain_id) {
  1736. /* Could not find one valid APQN for this adapter */
  1737. if (ac) {
  1738. AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
  1739. __func__, ap);
  1740. ap_scan_rm_card_dev_and_queue_devs(ac);
  1741. put_device(dev);
  1742. } else {
  1743. pr_debug("(%d) no type info (no APQN found), ignored\n",
  1744. ap);
  1745. }
  1746. return;
  1747. }
  1748. if (!hwinfo.at) {
  1749. /* No apdater type info available, an unusable adapter */
  1750. if (ac) {
  1751. AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
  1752. __func__, ap);
  1753. ap_scan_rm_card_dev_and_queue_devs(ac);
  1754. put_device(dev);
  1755. } else {
  1756. pr_debug("(%d) no valid type (0) info, ignored\n", ap);
  1757. }
  1758. return;
  1759. }
  1760. hwinfo.value &= TAPQ_CARD_HWINFO_MASK; /* filter card specific hwinfo */
  1761. if (ac) {
  1762. /* Check APQN against existing card device for changes */
  1763. if (ac->hwinfo.at != hwinfo.at) {
  1764. AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
  1765. __func__, ap, hwinfo.at);
  1766. ap_scan_rm_card_dev_and_queue_devs(ac);
  1767. put_device(dev);
  1768. ac = NULL;
  1769. } else if (ac->hwinfo.fac != hwinfo.fac) {
  1770. AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
  1771. __func__, ap, hwinfo.fac);
  1772. ap_scan_rm_card_dev_and_queue_devs(ac);
  1773. put_device(dev);
  1774. ac = NULL;
  1775. } else {
  1776. /* handle checkstop state change */
  1777. if (chkstop && !ac->chkstop) {
  1778. /* checkstop on */
  1779. ac->chkstop = true;
  1780. AP_DBF_INFO("%s(%d) card dev checkstop on\n",
  1781. __func__, ap);
  1782. } else if (!chkstop && ac->chkstop) {
  1783. /* checkstop off */
  1784. ac->chkstop = false;
  1785. AP_DBF_INFO("%s(%d) card dev checkstop off\n",
  1786. __func__, ap);
  1787. }
  1788. /* handle config state change */
  1789. if (decfg && ac->config) {
  1790. ac->config = false;
  1791. AP_DBF_INFO("%s(%d) card dev config off\n",
  1792. __func__, ap);
  1793. ap_send_config_uevent(&ac->ap_dev, ac->config);
  1794. } else if (!decfg && !ac->config) {
  1795. ac->config = true;
  1796. AP_DBF_INFO("%s(%d) card dev config on\n",
  1797. __func__, ap);
  1798. ap_send_config_uevent(&ac->ap_dev, ac->config);
  1799. }
  1800. }
  1801. }
  1802. if (!ac) {
  1803. /* Build a new card device */
  1804. comp_type = ap_get_compatible_type(qid, hwinfo.at, hwinfo.fac);
  1805. if (!comp_type) {
  1806. AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
  1807. __func__, ap, hwinfo.at);
  1808. return;
  1809. }
  1810. ac = ap_card_create(ap, hwinfo, comp_type);
  1811. if (!ac) {
  1812. AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
  1813. __func__, ap);
  1814. return;
  1815. }
  1816. ac->config = !decfg;
  1817. ac->chkstop = chkstop;
  1818. dev = &ac->ap_dev.device;
  1819. dev->bus = &ap_bus_type;
  1820. dev->parent = ap_root_device;
  1821. dev_set_name(dev, "card%02x", ap);
  1822. /* maybe enlarge ap_max_msg_size to support this card */
  1823. if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
  1824. atomic_set(&ap_max_msg_size, ac->maxmsgsize);
  1825. AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
  1826. __func__, ap,
  1827. atomic_read(&ap_max_msg_size));
  1828. }
  1829. /* Register the new card device with AP bus */
  1830. rc = device_register(dev);
  1831. if (rc) {
  1832. AP_DBF_WARN("%s(%d) device_register() failed\n",
  1833. __func__, ap);
  1834. put_device(dev);
  1835. return;
  1836. }
  1837. /* get it and thus adjust reference counter */
  1838. get_device(dev);
  1839. if (decfg)
  1840. AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
  1841. __func__, ap, hwinfo.at, hwinfo.fac);
  1842. else if (chkstop)
  1843. AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
  1844. __func__, ap, hwinfo.at, hwinfo.fac);
  1845. else
  1846. AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
  1847. __func__, ap, hwinfo.at, hwinfo.fac);
  1848. }
  1849. /* Verify the domains and the queue devices for this card */
  1850. ap_scan_domains(ac);
  1851. /* release the card device */
  1852. put_device(&ac->ap_dev.device);
  1853. }
  1854. /**
  1855. * ap_get_configuration - get the host AP configuration
  1856. *
  1857. * Stores the host AP configuration information returned from the previous call
  1858. * to Query Configuration Information (QCI), then retrieves and stores the
  1859. * current AP configuration returned from QCI.
  1860. *
  1861. * Return: true if the host AP configuration changed between calls to QCI;
  1862. * otherwise, return false.
  1863. */
  1864. static bool ap_get_configuration(void)
  1865. {
  1866. if (!ap_qci_info->flags) /* QCI not supported */
  1867. return false;
  1868. memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
  1869. ap_qci(ap_qci_info);
  1870. return memcmp(ap_qci_info, ap_qci_info_old,
  1871. sizeof(struct ap_config_info)) != 0;
  1872. }
  1873. /*
  1874. * ap_config_has_new_aps - Check current against old qci info if
  1875. * new adapters have appeared. Returns true if at least one new
  1876. * adapter in the apm mask is showing up. Existing adapters or
  1877. * receding adapters are not counted.
  1878. */
  1879. static bool ap_config_has_new_aps(void)
  1880. {
  1881. unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
  1882. if (!ap_qci_info->flags)
  1883. return false;
  1884. bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
  1885. (unsigned long *)ap_qci_info_old->apm, AP_DEVICES);
  1886. if (!bitmap_empty(m, AP_DEVICES))
  1887. return true;
  1888. return false;
  1889. }
  1890. /*
  1891. * ap_config_has_new_doms - Check current against old qci info if
  1892. * new (usage) domains have appeared. Returns true if at least one
  1893. * new domain in the aqm mask is showing up. Existing domains or
  1894. * receding domains are not counted.
  1895. */
  1896. static bool ap_config_has_new_doms(void)
  1897. {
  1898. unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
  1899. if (!ap_qci_info->flags)
  1900. return false;
  1901. bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
  1902. (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS);
  1903. if (!bitmap_empty(m, AP_DOMAINS))
  1904. return true;
  1905. return false;
  1906. }
  1907. /**
  1908. * ap_scan_bus(): Scan the AP bus for new devices
  1909. * Always run under mutex ap_scan_bus_mutex protection
  1910. * which needs to get locked/unlocked by the caller!
  1911. * Returns true if any config change has been detected
  1912. * during the scan, otherwise false.
  1913. */
  1914. static bool ap_scan_bus(void)
  1915. {
  1916. bool config_changed;
  1917. int ap;
  1918. pr_debug(">\n");
  1919. /* (re-)fetch configuration via QCI */
  1920. config_changed = ap_get_configuration();
  1921. if (config_changed) {
  1922. if (ap_config_has_new_aps() || ap_config_has_new_doms()) {
  1923. /*
  1924. * Appearance of new adapters and/or domains need to
  1925. * build new ap devices which need to get bound to an
  1926. * device driver. Thus reset the APQN bindings complete
  1927. * completion.
  1928. */
  1929. reinit_completion(&ap_apqn_bindings_complete);
  1930. }
  1931. /* post a config change notify */
  1932. notify_config_changed();
  1933. }
  1934. ap_select_domain();
  1935. /* loop over all possible adapters */
  1936. for (ap = 0; ap <= ap_max_adapter_id; ap++)
  1937. ap_scan_adapter(ap);
  1938. /* scan complete notify */
  1939. if (config_changed)
  1940. notify_scan_complete();
  1941. /* check if there is at least one queue available with default domain */
  1942. if (ap_domain_index >= 0) {
  1943. struct device *dev =
  1944. bus_find_device(&ap_bus_type, NULL,
  1945. (void *)(long)ap_domain_index,
  1946. __match_queue_device_with_queue_id);
  1947. if (dev)
  1948. put_device(dev);
  1949. else
  1950. AP_DBF_INFO("%s no queue device with default domain %d available\n",
  1951. __func__, ap_domain_index);
  1952. }
  1953. if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
  1954. pr_debug("init scan complete\n");
  1955. ap_send_init_scan_done_uevent();
  1956. }
  1957. ap_check_bindings_complete();
  1958. mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
  1959. pr_debug("< config_changed=%d\n", config_changed);
  1960. return config_changed;
  1961. }
  1962. /*
  1963. * Callback for the ap_scan_bus_timer
  1964. * Runs periodically, workqueue timer (ap_scan_bus_time)
  1965. */
  1966. static void ap_scan_bus_timer_callback(struct timer_list *unused)
  1967. {
  1968. /*
  1969. * schedule work into the system long wq which when
  1970. * the work is finally executed, calls the AP bus scan.
  1971. */
  1972. queue_work(system_long_wq, &ap_scan_bus_work);
  1973. }
  1974. /*
  1975. * Callback for the ap_scan_bus_work
  1976. */
  1977. static void ap_scan_bus_wq_callback(struct work_struct *unused)
  1978. {
  1979. /*
  1980. * Try to invoke an ap_scan_bus(). If the mutex acquisition
  1981. * fails there is currently another task already running the
  1982. * AP scan bus and there is no need to wait and re-trigger the
  1983. * scan again. Please note at the end of the scan bus function
  1984. * the AP scan bus timer is re-armed which triggers then the
  1985. * ap_scan_bus_timer_callback which enqueues a work into the
  1986. * system_long_wq which invokes this function here again.
  1987. */
  1988. if (mutex_trylock(&ap_scan_bus_mutex)) {
  1989. ap_scan_bus_task = current;
  1990. ap_scan_bus_result = ap_scan_bus();
  1991. ap_scan_bus_task = NULL;
  1992. mutex_unlock(&ap_scan_bus_mutex);
  1993. }
  1994. }
  1995. static inline void __exit ap_async_exit(void)
  1996. {
  1997. if (ap_thread_flag)
  1998. ap_poll_thread_stop();
  1999. chsc_notifier_unregister(&ap_bus_nb);
  2000. cancel_work(&ap_scan_bus_work);
  2001. hrtimer_cancel(&ap_poll_timer);
  2002. timer_delete(&ap_scan_bus_timer);
  2003. }
  2004. static inline int __init ap_async_init(void)
  2005. {
  2006. int rc;
  2007. /* Setup the AP bus rescan timer. */
  2008. timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
  2009. /*
  2010. * Setup the high resolution poll timer.
  2011. * If we are running under z/VM adjust polling to z/VM polling rate.
  2012. */
  2013. if (MACHINE_IS_VM)
  2014. poll_high_timeout = 1500000;
  2015. hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  2016. ap_poll_timer.function = ap_poll_timeout;
  2017. queue_work(system_long_wq, &ap_scan_bus_work);
  2018. rc = chsc_notifier_register(&ap_bus_nb);
  2019. if (rc)
  2020. goto out;
  2021. /* Start the low priority AP bus poll thread. */
  2022. if (!ap_thread_flag)
  2023. return 0;
  2024. rc = ap_poll_thread_start();
  2025. if (rc)
  2026. goto out_notifier;
  2027. return 0;
  2028. out_notifier:
  2029. chsc_notifier_unregister(&ap_bus_nb);
  2030. out:
  2031. cancel_work(&ap_scan_bus_work);
  2032. hrtimer_cancel(&ap_poll_timer);
  2033. timer_delete(&ap_scan_bus_timer);
  2034. return rc;
  2035. }
  2036. static inline void ap_irq_exit(void)
  2037. {
  2038. if (ap_irq_flag)
  2039. unregister_adapter_interrupt(&ap_airq);
  2040. }
  2041. static inline int __init ap_irq_init(void)
  2042. {
  2043. int rc;
  2044. if (!ap_interrupts_available() || !ap_useirq)
  2045. return 0;
  2046. rc = register_adapter_interrupt(&ap_airq);
  2047. ap_irq_flag = (rc == 0);
  2048. return rc;
  2049. }
  2050. static inline void ap_debug_exit(void)
  2051. {
  2052. debug_unregister(ap_dbf_info);
  2053. }
  2054. static inline int __init ap_debug_init(void)
  2055. {
  2056. ap_dbf_info = debug_register("ap", 2, 1,
  2057. AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
  2058. debug_register_view(ap_dbf_info, &debug_sprintf_view);
  2059. debug_set_level(ap_dbf_info, DBF_ERR);
  2060. return 0;
  2061. }
  2062. static void __init ap_perms_init(void)
  2063. {
  2064. /* all resources usable if no kernel parameter string given */
  2065. memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
  2066. memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
  2067. memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
  2068. /* apm kernel parameter string */
  2069. if (apm_str) {
  2070. memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
  2071. ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
  2072. &ap_perms_mutex);
  2073. }
  2074. /* aqm kernel parameter string */
  2075. if (aqm_str) {
  2076. memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
  2077. ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
  2078. &ap_perms_mutex);
  2079. }
  2080. }
  2081. /**
  2082. * ap_module_init(): The module initialization code.
  2083. *
  2084. * Initializes the module.
  2085. */
  2086. static int __init ap_module_init(void)
  2087. {
  2088. int rc;
  2089. rc = ap_debug_init();
  2090. if (rc)
  2091. return rc;
  2092. if (!ap_instructions_available()) {
  2093. pr_warn("The hardware system does not support AP instructions\n");
  2094. return -ENODEV;
  2095. }
  2096. /* init ap_queue hashtable */
  2097. hash_init(ap_queues);
  2098. /* set up the AP permissions (ioctls, ap and aq masks) */
  2099. ap_perms_init();
  2100. /* Get AP configuration data if available */
  2101. ap_init_qci_info();
  2102. /* check default domain setting */
  2103. if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
  2104. (ap_domain_index >= 0 &&
  2105. !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
  2106. pr_warn("%d is not a valid cryptographic domain\n",
  2107. ap_domain_index);
  2108. ap_domain_index = -1;
  2109. }
  2110. /* Create /sys/bus/ap. */
  2111. rc = bus_register(&ap_bus_type);
  2112. if (rc)
  2113. goto out;
  2114. /* Create /sys/devices/ap. */
  2115. ap_root_device = root_device_register("ap");
  2116. rc = PTR_ERR_OR_ZERO(ap_root_device);
  2117. if (rc)
  2118. goto out_bus;
  2119. ap_root_device->bus = &ap_bus_type;
  2120. /* enable interrupts if available */
  2121. rc = ap_irq_init();
  2122. if (rc)
  2123. goto out_device;
  2124. /* Setup asynchronous work (timers, workqueue, etc). */
  2125. rc = ap_async_init();
  2126. if (rc)
  2127. goto out_irq;
  2128. return 0;
  2129. out_irq:
  2130. ap_irq_exit();
  2131. out_device:
  2132. root_device_unregister(ap_root_device);
  2133. out_bus:
  2134. bus_unregister(&ap_bus_type);
  2135. out:
  2136. ap_debug_exit();
  2137. return rc;
  2138. }
  2139. static void __exit ap_module_exit(void)
  2140. {
  2141. ap_async_exit();
  2142. ap_irq_exit();
  2143. root_device_unregister(ap_root_device);
  2144. bus_unregister(&ap_bus_type);
  2145. ap_debug_exit();
  2146. }
  2147. module_init(ap_module_init);
  2148. module_exit(ap_module_exit);