vfio_ap_ops.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Adjunct processor matrix VFIO device driver callbacks.
  4. *
  5. * Copyright IBM Corp. 2018
  6. *
  7. * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
  8. * Halil Pasic <pasic@linux.ibm.com>
  9. * Pierre Morel <pmorel@linux.ibm.com>
  10. */
  11. #include <linux/string.h>
  12. #include <linux/vfio.h>
  13. #include <linux/device.h>
  14. #include <linux/list.h>
  15. #include <linux/ctype.h>
  16. #include <linux/bitops.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/module.h>
  19. #include <linux/uuid.h>
  20. #include <asm/kvm.h>
  21. #include <asm/zcrypt.h>
  22. #include "vfio_ap_private.h"
  23. #include "vfio_ap_debug.h"
  24. #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
  25. #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
  26. #define AP_QUEUE_ASSIGNED "assigned"
  27. #define AP_QUEUE_UNASSIGNED "unassigned"
  28. #define AP_QUEUE_IN_USE "in use"
  29. #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
  30. static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
  31. static int vfio_ap_mdev_reset_qlist(struct list_head *qlist);
  32. static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
  33. static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
  34. static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
  35. /**
  36. * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
  37. * KVM guest's APCB in the proper order.
  38. *
  39. * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
  40. *
  41. * The proper locking order is:
  42. * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
  43. * guest's APCB.
  44. * 2. kvm->lock: required to update a guest's APCB
  45. * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
  46. *
  47. * Note: If @kvm is NULL, the KVM lock will not be taken.
  48. */
  49. static inline void get_update_locks_for_kvm(struct kvm *kvm)
  50. {
  51. mutex_lock(&matrix_dev->guests_lock);
  52. if (kvm)
  53. mutex_lock(&kvm->lock);
  54. mutex_lock(&matrix_dev->mdevs_lock);
  55. }
  56. /**
  57. * release_update_locks_for_kvm: Release the locks used to dynamically update a
  58. * KVM guest's APCB in the proper order.
  59. *
  60. * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
  61. *
  62. * The proper unlocking order is:
  63. * 1. matrix_dev->mdevs_lock
  64. * 2. kvm->lock
  65. * 3. matrix_dev->guests_lock
  66. *
  67. * Note: If @kvm is NULL, the KVM lock will not be released.
  68. */
  69. static inline void release_update_locks_for_kvm(struct kvm *kvm)
  70. {
  71. mutex_unlock(&matrix_dev->mdevs_lock);
  72. if (kvm)
  73. mutex_unlock(&kvm->lock);
  74. mutex_unlock(&matrix_dev->guests_lock);
  75. }
  76. /**
  77. * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
  78. * KVM guest's APCB in the proper order.
  79. *
  80. * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
  81. * configuration data to use to update a KVM guest's APCB.
  82. *
  83. * The proper locking order is:
  84. * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
  85. * guest's APCB.
  86. * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
  87. * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
  88. *
  89. * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
  90. * lock will not be taken.
  91. */
  92. static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
  93. {
  94. mutex_lock(&matrix_dev->guests_lock);
  95. if (matrix_mdev && matrix_mdev->kvm)
  96. mutex_lock(&matrix_mdev->kvm->lock);
  97. mutex_lock(&matrix_dev->mdevs_lock);
  98. }
  99. /**
  100. * release_update_locks_for_mdev: Release the locks used to dynamically update a
  101. * KVM guest's APCB in the proper order.
  102. *
  103. * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
  104. * configuration data to use to update a KVM guest's APCB.
  105. *
  106. * The proper unlocking order is:
  107. * 1. matrix_dev->mdevs_lock
  108. * 2. matrix_mdev->kvm->lock
  109. * 3. matrix_dev->guests_lock
  110. *
  111. * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
  112. * lock will not be released.
  113. */
  114. static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
  115. {
  116. mutex_unlock(&matrix_dev->mdevs_lock);
  117. if (matrix_mdev && matrix_mdev->kvm)
  118. mutex_unlock(&matrix_mdev->kvm->lock);
  119. mutex_unlock(&matrix_dev->guests_lock);
  120. }
  121. /**
  122. * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
  123. * acquire the locks required to update the APCB of
  124. * the KVM guest to which the mdev is attached.
  125. *
  126. * @apqn: the APQN of a queue device.
  127. *
  128. * The proper locking order is:
  129. * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
  130. * guest's APCB.
  131. * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
  132. * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
  133. *
  134. * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
  135. * will not be taken.
  136. *
  137. * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
  138. * is not assigned to an ap_matrix_mdev.
  139. */
  140. static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
  141. {
  142. struct ap_matrix_mdev *matrix_mdev;
  143. mutex_lock(&matrix_dev->guests_lock);
  144. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  145. if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
  146. test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
  147. if (matrix_mdev->kvm)
  148. mutex_lock(&matrix_mdev->kvm->lock);
  149. mutex_lock(&matrix_dev->mdevs_lock);
  150. return matrix_mdev;
  151. }
  152. }
  153. mutex_lock(&matrix_dev->mdevs_lock);
  154. return NULL;
  155. }
  156. /**
  157. * get_update_locks_for_queue: get the locks required to update the APCB of the
  158. * KVM guest to which the matrix mdev linked to a
  159. * vfio_ap_queue object is attached.
  160. *
  161. * @q: a pointer to a vfio_ap_queue object.
  162. *
  163. * The proper locking order is:
  164. * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
  165. * KVM guest's APCB.
  166. * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
  167. * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
  168. *
  169. * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
  170. * will not be taken.
  171. */
  172. static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
  173. {
  174. mutex_lock(&matrix_dev->guests_lock);
  175. if (q->matrix_mdev && q->matrix_mdev->kvm)
  176. mutex_lock(&q->matrix_mdev->kvm->lock);
  177. mutex_lock(&matrix_dev->mdevs_lock);
  178. }
  179. /**
  180. * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
  181. * hash table of queues assigned to a matrix mdev
  182. * @matrix_mdev: the matrix mdev
  183. * @apqn: The APQN of a queue device
  184. *
  185. * Return: the pointer to the vfio_ap_queue struct representing the queue or
  186. * NULL if the queue is not assigned to @matrix_mdev
  187. */
  188. static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
  189. struct ap_matrix_mdev *matrix_mdev,
  190. int apqn)
  191. {
  192. struct vfio_ap_queue *q;
  193. hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
  194. apqn) {
  195. if (q && q->apqn == apqn)
  196. return q;
  197. }
  198. return NULL;
  199. }
  200. /**
  201. * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
  202. * @apqn: The AP Queue number
  203. *
  204. * Checks the IRQ bit for the status of this APQN using ap_tapq.
  205. * Returns if the ap_tapq function succeeded and the bit is clear.
  206. * Returns if ap_tapq function failed with invalid, deconfigured or
  207. * checkstopped AP.
  208. * Otherwise retries up to 5 times after waiting 20ms.
  209. */
  210. static void vfio_ap_wait_for_irqclear(int apqn)
  211. {
  212. struct ap_queue_status status;
  213. int retry = 5;
  214. do {
  215. status = ap_tapq(apqn, NULL);
  216. switch (status.response_code) {
  217. case AP_RESPONSE_NORMAL:
  218. case AP_RESPONSE_RESET_IN_PROGRESS:
  219. if (!status.irq_enabled)
  220. return;
  221. fallthrough;
  222. case AP_RESPONSE_BUSY:
  223. msleep(20);
  224. break;
  225. case AP_RESPONSE_Q_NOT_AVAIL:
  226. case AP_RESPONSE_DECONFIGURED:
  227. case AP_RESPONSE_CHECKSTOPPED:
  228. default:
  229. WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
  230. status.response_code, apqn);
  231. return;
  232. }
  233. } while (--retry);
  234. WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
  235. __func__, status.response_code, apqn);
  236. }
  237. /**
  238. * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
  239. * @q: The vfio_ap_queue
  240. *
  241. * Unregisters the ISC in the GIB when the saved ISC not invalid.
  242. * Unpins the guest's page holding the NIB when it exists.
  243. * Resets the saved_iova and saved_isc to invalid values.
  244. */
  245. static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
  246. {
  247. if (!q)
  248. return;
  249. if (q->saved_isc != VFIO_AP_ISC_INVALID &&
  250. !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
  251. kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
  252. q->saved_isc = VFIO_AP_ISC_INVALID;
  253. }
  254. if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
  255. vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
  256. q->saved_iova = 0;
  257. }
  258. }
  259. /**
  260. * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
  261. * @q: The vfio_ap_queue
  262. *
  263. * Uses ap_aqic to disable the interruption and in case of success, reset
  264. * in progress or IRQ disable command already proceeded: calls
  265. * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
  266. * and calls vfio_ap_free_aqic_resources() to free the resources associated
  267. * with the AP interrupt handling.
  268. *
  269. * In the case the AP is busy, or a reset is in progress,
  270. * retries after 20ms, up to 5 times.
  271. *
  272. * Returns if ap_aqic function failed with invalid, deconfigured or
  273. * checkstopped AP.
  274. *
  275. * Return: &struct ap_queue_status
  276. */
  277. static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
  278. {
  279. union ap_qirq_ctrl aqic_gisa = { .value = 0 };
  280. struct ap_queue_status status;
  281. int retries = 5;
  282. do {
  283. status = ap_aqic(q->apqn, aqic_gisa, 0);
  284. switch (status.response_code) {
  285. case AP_RESPONSE_OTHERWISE_CHANGED:
  286. case AP_RESPONSE_NORMAL:
  287. vfio_ap_wait_for_irqclear(q->apqn);
  288. goto end_free;
  289. case AP_RESPONSE_RESET_IN_PROGRESS:
  290. case AP_RESPONSE_BUSY:
  291. msleep(20);
  292. break;
  293. case AP_RESPONSE_Q_NOT_AVAIL:
  294. case AP_RESPONSE_DECONFIGURED:
  295. case AP_RESPONSE_CHECKSTOPPED:
  296. case AP_RESPONSE_INVALID_ADDRESS:
  297. default:
  298. /* All cases in default means AP not operational */
  299. WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
  300. status.response_code);
  301. goto end_free;
  302. }
  303. } while (retries--);
  304. WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
  305. status.response_code);
  306. end_free:
  307. vfio_ap_free_aqic_resources(q);
  308. return status;
  309. }
  310. /**
  311. * vfio_ap_validate_nib - validate a notification indicator byte (nib) address.
  312. *
  313. * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
  314. * @nib: the location for storing the nib address.
  315. *
  316. * When the PQAP(AQIC) instruction is executed, general register 2 contains the
  317. * address of the notification indicator byte (nib) used for IRQ notification.
  318. * This function parses and validates the nib from gr2.
  319. *
  320. * Return: returns zero if the nib address is a valid; otherwise, returns
  321. * -EINVAL.
  322. */
  323. static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
  324. {
  325. *nib = vcpu->run->s.regs.gprs[2];
  326. if (!*nib)
  327. return -EINVAL;
  328. if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
  329. return -EINVAL;
  330. return 0;
  331. }
  332. static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
  333. {
  334. int ret;
  335. /*
  336. * The nib has to be located in shared storage since guest and
  337. * host access it. vfio_pin_pages() will do a pin shared and
  338. * if that fails (possibly because it's not a shared page) it
  339. * calls export. We try to do a second pin shared here so that
  340. * the UV gives us an error code if we try to pin a non-shared
  341. * page.
  342. *
  343. * If the page is already pinned shared the UV will return a success.
  344. */
  345. ret = uv_pin_shared(addr);
  346. if (ret) {
  347. /* vfio_pin_pages() likely exported the page so let's re-import */
  348. gmap_convert_to_secure(gmap, addr);
  349. }
  350. return ret;
  351. }
  352. /**
  353. * vfio_ap_irq_enable - Enable Interruption for a APQN
  354. *
  355. * @q: the vfio_ap_queue holding AQIC parameters
  356. * @isc: the guest ISC to register with the GIB interface
  357. * @vcpu: the vcpu object containing the registers specifying the parameters
  358. * passed to the PQAP(AQIC) instruction.
  359. *
  360. * Pin the NIB saved in *q
  361. * Register the guest ISC to GIB interface and retrieve the
  362. * host ISC to issue the host side PQAP/AQIC
  363. *
  364. * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the
  365. * vfio_pin_pages or kvm_s390_gisc_register failed.
  366. *
  367. * Otherwise return the ap_queue_status returned by the ap_aqic(),
  368. * all retry handling will be done by the guest.
  369. *
  370. * Return: &struct ap_queue_status
  371. */
  372. static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
  373. int isc,
  374. struct kvm_vcpu *vcpu)
  375. {
  376. union ap_qirq_ctrl aqic_gisa = { .value = 0 };
  377. struct ap_queue_status status = {};
  378. struct kvm_s390_gisa *gisa;
  379. struct page *h_page;
  380. int nisc;
  381. struct kvm *kvm;
  382. phys_addr_t h_nib;
  383. dma_addr_t nib;
  384. int ret;
  385. /* Verify that the notification indicator byte address is valid */
  386. if (vfio_ap_validate_nib(vcpu, &nib)) {
  387. VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
  388. __func__, &nib, q->apqn);
  389. status.response_code = AP_RESPONSE_INVALID_ADDRESS;
  390. return status;
  391. }
  392. ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
  393. IOMMU_READ | IOMMU_WRITE, &h_page);
  394. switch (ret) {
  395. case 1:
  396. break;
  397. default:
  398. VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
  399. "nib=%pad, apqn=%#04x\n",
  400. __func__, ret, &nib, q->apqn);
  401. status.response_code = AP_RESPONSE_INVALID_ADDRESS;
  402. return status;
  403. }
  404. kvm = q->matrix_mdev->kvm;
  405. gisa = kvm->arch.gisa_int.origin;
  406. h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
  407. aqic_gisa.gisc = isc;
  408. /* NIB in non-shared storage is a rc 6 for PV guests */
  409. if (kvm_s390_pv_cpu_is_protected(vcpu) &&
  410. ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) {
  411. vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
  412. status.response_code = AP_RESPONSE_INVALID_ADDRESS;
  413. return status;
  414. }
  415. nisc = kvm_s390_gisc_register(kvm, isc);
  416. if (nisc < 0) {
  417. VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
  418. __func__, nisc, isc, q->apqn);
  419. vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
  420. status.response_code = AP_RESPONSE_INVALID_ADDRESS;
  421. return status;
  422. }
  423. aqic_gisa.isc = nisc;
  424. aqic_gisa.ir = 1;
  425. aqic_gisa.gisa = virt_to_phys(gisa) >> 4;
  426. status = ap_aqic(q->apqn, aqic_gisa, h_nib);
  427. switch (status.response_code) {
  428. case AP_RESPONSE_NORMAL:
  429. /* See if we did clear older IRQ configuration */
  430. vfio_ap_free_aqic_resources(q);
  431. q->saved_iova = nib;
  432. q->saved_isc = isc;
  433. break;
  434. case AP_RESPONSE_OTHERWISE_CHANGED:
  435. /* We could not modify IRQ settings: clear new configuration */
  436. ret = kvm_s390_gisc_unregister(kvm, isc);
  437. if (ret)
  438. VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n",
  439. __func__, ret, isc, q->apqn);
  440. vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
  441. break;
  442. default:
  443. pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
  444. status.response_code);
  445. vfio_ap_irq_disable(q);
  446. break;
  447. }
  448. if (status.response_code != AP_RESPONSE_NORMAL) {
  449. VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: "
  450. "zone=%#x, ir=%#x, gisc=%#x, f=%#x,"
  451. "gisa=%#x, isc=%#x, apqn=%#04x\n",
  452. __func__, status.response_code,
  453. aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc,
  454. aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc,
  455. q->apqn);
  456. }
  457. return status;
  458. }
  459. /**
  460. * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array
  461. * of big endian elements that can be passed by
  462. * value to an s390dbf sprintf event function to
  463. * format a UUID string.
  464. *
  465. * @guid: the object containing the little endian guid
  466. * @uuid: a six-element array of long values that can be passed by value as
  467. * arguments for a formatting string specifying a UUID.
  468. *
  469. * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf
  470. * event functions if the memory for the passed string is available as long as
  471. * the debug feature exists. Since a mediated device can be removed at any
  472. * time, it's name can not be used because %s passes the reference to the string
  473. * in memory and the reference will go stale once the device is removed .
  474. *
  475. * The s390dbf string formatting function allows a maximum of 9 arguments for a
  476. * message to be displayed in the 'sprintf' view. In order to use the bytes
  477. * comprising the mediated device's UUID to display the mediated device name,
  478. * they will have to be converted into an array whose elements can be passed by
  479. * value to sprintf. For example:
  480. *
  481. * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 }
  482. * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804
  483. * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 }
  484. * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx"
  485. */
  486. static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
  487. {
  488. /*
  489. * The input guid is ordered in little endian, so it needs to be
  490. * reordered for displaying a UUID as a string. This specifies the
  491. * guid indices in proper order.
  492. */
  493. uuid[0] = le32_to_cpup((__le32 *)guid);
  494. uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]);
  495. uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]);
  496. uuid[3] = *((__u16 *)&guid->b[8]);
  497. uuid[4] = *((__u16 *)&guid->b[10]);
  498. uuid[5] = *((__u32 *)&guid->b[12]);
  499. }
  500. /**
  501. * handle_pqap - PQAP instruction callback
  502. *
  503. * @vcpu: The vcpu on which we received the PQAP instruction
  504. *
  505. * Get the general register contents to initialize internal variables.
  506. * REG[0]: APQN
  507. * REG[1]: IR and ISC
  508. * REG[2]: NIB
  509. *
  510. * Response.status may be set to following Response Code:
  511. * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
  512. * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
  513. * - AP_RESPONSE_NORMAL (0) : in case of success
  514. * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
  515. * We take the matrix_dev lock to ensure serialization on queues and
  516. * mediated device access.
  517. *
  518. * Return: 0 if we could handle the request inside KVM.
  519. * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
  520. */
  521. static int handle_pqap(struct kvm_vcpu *vcpu)
  522. {
  523. uint64_t status;
  524. uint16_t apqn;
  525. unsigned long uuid[6];
  526. struct vfio_ap_queue *q;
  527. struct ap_queue_status qstatus = {
  528. .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
  529. struct ap_matrix_mdev *matrix_mdev;
  530. apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
  531. /* If we do not use the AIV facility just go to userland */
  532. if (!(vcpu->arch.sie_block->eca & ECA_AIV)) {
  533. VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n",
  534. __func__, apqn, vcpu->arch.sie_block->eca);
  535. return -EOPNOTSUPP;
  536. }
  537. mutex_lock(&matrix_dev->mdevs_lock);
  538. if (!vcpu->kvm->arch.crypto.pqap_hook) {
  539. VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
  540. __func__, apqn);
  541. goto out_unlock;
  542. }
  543. matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
  544. struct ap_matrix_mdev, pqap_hook);
  545. /* If the there is no guest using the mdev, there is nothing to do */
  546. if (!matrix_mdev->kvm) {
  547. vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid);
  548. VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n",
  549. __func__, uuid[0], uuid[1], uuid[2],
  550. uuid[3], uuid[4], uuid[5], apqn);
  551. goto out_unlock;
  552. }
  553. q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
  554. if (!q) {
  555. VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
  556. __func__, AP_QID_CARD(apqn),
  557. AP_QID_QUEUE(apqn));
  558. goto out_unlock;
  559. }
  560. status = vcpu->run->s.regs.gprs[1];
  561. /* If IR bit(16) is set we enable the interrupt */
  562. if ((status >> (63 - 16)) & 0x01)
  563. qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu);
  564. else
  565. qstatus = vfio_ap_irq_disable(q);
  566. out_unlock:
  567. memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
  568. vcpu->run->s.regs.gprs[1] >>= 32;
  569. mutex_unlock(&matrix_dev->mdevs_lock);
  570. return 0;
  571. }
  572. static void vfio_ap_matrix_init(struct ap_config_info *info,
  573. struct ap_matrix *matrix)
  574. {
  575. matrix->apm_max = info->apxa ? info->na : 63;
  576. matrix->aqm_max = info->apxa ? info->nd : 15;
  577. matrix->adm_max = info->apxa ? info->nd : 15;
  578. }
  579. static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
  580. {
  581. if (matrix_mdev->kvm)
  582. kvm_arch_crypto_set_masks(matrix_mdev->kvm,
  583. matrix_mdev->shadow_apcb.apm,
  584. matrix_mdev->shadow_apcb.aqm,
  585. matrix_mdev->shadow_apcb.adm);
  586. }
  587. static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
  588. {
  589. DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
  590. bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
  591. bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
  592. (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
  593. return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
  594. AP_DOMAINS);
  595. }
  596. static bool _queue_passable(struct vfio_ap_queue *q)
  597. {
  598. if (!q)
  599. return false;
  600. switch (q->reset_status.response_code) {
  601. case AP_RESPONSE_NORMAL:
  602. case AP_RESPONSE_DECONFIGURED:
  603. case AP_RESPONSE_CHECKSTOPPED:
  604. return true;
  605. default:
  606. return false;
  607. }
  608. }
  609. /*
  610. * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
  611. * to ensure no queue devices are passed through to
  612. * the guest that are not bound to the vfio_ap
  613. * device driver.
  614. *
  615. * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
  616. * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the
  617. * guest's AP configuration that are still in the host's AP
  618. * configuration.
  619. *
  620. * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
  621. * driver, its APID will be filtered from the guest's APCB. The matrix
  622. * structure precludes filtering an individual APQN, so its APID will be
  623. * filtered. Consequently, all queues associated with the adapter that
  624. * are in the host's AP configuration must be reset. If queues are
  625. * subsequently made available again to the guest, they should re-appear
  626. * in a reset state
  627. *
  628. * Return: a boolean value indicating whether the KVM guest's APCB was changed
  629. * by the filtering or not.
  630. */
  631. static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
  632. unsigned long *apm_filtered)
  633. {
  634. unsigned long apid, apqi, apqn;
  635. DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
  636. DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
  637. bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
  638. bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
  639. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
  640. bitmap_clear(apm_filtered, 0, AP_DEVICES);
  641. /*
  642. * Copy the adapters, domains and control domains to the shadow_apcb
  643. * from the matrix mdev, but only those that are assigned to the host's
  644. * AP configuration.
  645. */
  646. bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
  647. (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
  648. bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
  649. (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
  650. for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
  651. for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
  652. AP_DOMAINS) {
  653. /*
  654. * If the APQN is not bound to the vfio_ap device
  655. * driver, then we can't assign it to the guest's
  656. * AP configuration. The AP architecture won't
  657. * allow filtering of a single APQN, so let's filter
  658. * the APID since an adapter represents a physical
  659. * hardware device.
  660. */
  661. apqn = AP_MKQID(apid, apqi);
  662. if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) {
  663. clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
  664. /*
  665. * If the adapter was previously plugged into
  666. * the guest, let's let the caller know that
  667. * the APID was filtered.
  668. */
  669. if (test_bit_inv(apid, prev_shadow_apm))
  670. set_bit_inv(apid, apm_filtered);
  671. break;
  672. }
  673. }
  674. }
  675. return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
  676. AP_DEVICES) ||
  677. !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
  678. AP_DOMAINS);
  679. }
  680. static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
  681. {
  682. struct ap_matrix_mdev *matrix_mdev =
  683. container_of(vdev, struct ap_matrix_mdev, vdev);
  684. matrix_mdev->mdev = to_mdev_device(vdev->dev);
  685. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
  686. matrix_mdev->pqap_hook = handle_pqap;
  687. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
  688. hash_init(matrix_mdev->qtable.queues);
  689. return 0;
  690. }
  691. static int vfio_ap_mdev_probe(struct mdev_device *mdev)
  692. {
  693. struct ap_matrix_mdev *matrix_mdev;
  694. int ret;
  695. matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
  696. &vfio_ap_matrix_dev_ops);
  697. if (IS_ERR(matrix_mdev))
  698. return PTR_ERR(matrix_mdev);
  699. ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
  700. if (ret)
  701. goto err_put_vdev;
  702. matrix_mdev->req_trigger = NULL;
  703. dev_set_drvdata(&mdev->dev, matrix_mdev);
  704. mutex_lock(&matrix_dev->mdevs_lock);
  705. list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
  706. mutex_unlock(&matrix_dev->mdevs_lock);
  707. return 0;
  708. err_put_vdev:
  709. vfio_put_device(&matrix_mdev->vdev);
  710. return ret;
  711. }
  712. static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
  713. struct vfio_ap_queue *q)
  714. {
  715. if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn))
  716. return;
  717. q->matrix_mdev = matrix_mdev;
  718. hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
  719. }
  720. static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
  721. {
  722. struct vfio_ap_queue *q;
  723. q = vfio_ap_find_queue(apqn);
  724. vfio_ap_mdev_link_queue(matrix_mdev, q);
  725. }
  726. static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
  727. {
  728. hash_del(&q->mdev_qnode);
  729. }
  730. static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
  731. {
  732. q->matrix_mdev = NULL;
  733. }
  734. static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
  735. {
  736. struct vfio_ap_queue *q;
  737. unsigned long apid, apqi;
  738. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
  739. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
  740. AP_DOMAINS) {
  741. q = vfio_ap_mdev_get_queue(matrix_mdev,
  742. AP_MKQID(apid, apqi));
  743. if (q)
  744. q->matrix_mdev = NULL;
  745. }
  746. }
  747. }
  748. static void vfio_ap_mdev_remove(struct mdev_device *mdev)
  749. {
  750. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
  751. vfio_unregister_group_dev(&matrix_mdev->vdev);
  752. mutex_lock(&matrix_dev->guests_lock);
  753. mutex_lock(&matrix_dev->mdevs_lock);
  754. vfio_ap_mdev_reset_queues(matrix_mdev);
  755. vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
  756. list_del(&matrix_mdev->node);
  757. mutex_unlock(&matrix_dev->mdevs_lock);
  758. mutex_unlock(&matrix_dev->guests_lock);
  759. vfio_put_device(&matrix_mdev->vdev);
  760. }
  761. #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
  762. "already assigned to %s"
  763. static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
  764. unsigned long *apm,
  765. unsigned long *aqm)
  766. {
  767. unsigned long apid, apqi;
  768. const struct device *dev = mdev_dev(matrix_mdev->mdev);
  769. const char *mdev_name = dev_name(dev);
  770. for_each_set_bit_inv(apid, apm, AP_DEVICES)
  771. for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
  772. dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
  773. }
  774. /**
  775. * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
  776. *
  777. * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
  778. * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
  779. *
  780. * Verifies that each APQN derived from the Cartesian product of a bitmap of
  781. * AP adapter IDs and AP queue indexes is not configured for any matrix
  782. * mediated device. AP queue sharing is not allowed.
  783. *
  784. * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
  785. */
  786. static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
  787. unsigned long *mdev_aqm)
  788. {
  789. struct ap_matrix_mdev *matrix_mdev;
  790. DECLARE_BITMAP(apm, AP_DEVICES);
  791. DECLARE_BITMAP(aqm, AP_DOMAINS);
  792. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  793. /*
  794. * If the input apm and aqm are fields of the matrix_mdev
  795. * object, then move on to the next matrix_mdev.
  796. */
  797. if (mdev_apm == matrix_mdev->matrix.apm &&
  798. mdev_aqm == matrix_mdev->matrix.aqm)
  799. continue;
  800. memset(apm, 0, sizeof(apm));
  801. memset(aqm, 0, sizeof(aqm));
  802. /*
  803. * We work on full longs, as we can only exclude the leftover
  804. * bits in non-inverse order. The leftover is all zeros.
  805. */
  806. if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
  807. AP_DEVICES))
  808. continue;
  809. if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
  810. AP_DOMAINS))
  811. continue;
  812. vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
  813. return -EADDRINUSE;
  814. }
  815. return 0;
  816. }
  817. /**
  818. * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
  819. * not reserved for the default zcrypt driver and
  820. * are not assigned to another mdev.
  821. *
  822. * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
  823. *
  824. * Return: One of the following values:
  825. * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
  826. * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
  827. * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
  828. * zcrypt default driver.
  829. * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
  830. * o A zero indicating validation succeeded.
  831. */
  832. static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
  833. {
  834. if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
  835. matrix_mdev->matrix.aqm))
  836. return -EADDRNOTAVAIL;
  837. return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
  838. matrix_mdev->matrix.aqm);
  839. }
  840. static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
  841. unsigned long apid)
  842. {
  843. unsigned long apqi;
  844. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
  845. vfio_ap_mdev_link_apqn(matrix_mdev,
  846. AP_MKQID(apid, apqi));
  847. }
  848. static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev,
  849. unsigned long apid,
  850. struct list_head *qlist)
  851. {
  852. struct vfio_ap_queue *q;
  853. unsigned long apqi;
  854. for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) {
  855. q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
  856. if (q)
  857. list_add_tail(&q->reset_qnode, qlist);
  858. }
  859. }
  860. static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev,
  861. unsigned long apid)
  862. {
  863. struct list_head qlist;
  864. INIT_LIST_HEAD(&qlist);
  865. collect_queues_to_reset(matrix_mdev, apid, &qlist);
  866. vfio_ap_mdev_reset_qlist(&qlist);
  867. }
  868. static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
  869. unsigned long *apm_reset)
  870. {
  871. struct list_head qlist;
  872. unsigned long apid;
  873. if (bitmap_empty(apm_reset, AP_DEVICES))
  874. return 0;
  875. INIT_LIST_HEAD(&qlist);
  876. for_each_set_bit_inv(apid, apm_reset, AP_DEVICES)
  877. collect_queues_to_reset(matrix_mdev, apid, &qlist);
  878. return vfio_ap_mdev_reset_qlist(&qlist);
  879. }
  880. /**
  881. * assign_adapter_store - parses the APID from @buf and sets the
  882. * corresponding bit in the mediated matrix device's APM
  883. *
  884. * @dev: the matrix device
  885. * @attr: the mediated matrix device's assign_adapter attribute
  886. * @buf: a buffer containing the AP adapter number (APID) to
  887. * be assigned
  888. * @count: the number of bytes in @buf
  889. *
  890. * Return: the number of bytes processed if the APID is valid; otherwise,
  891. * returns one of the following errors:
  892. *
  893. * 1. -EINVAL
  894. * The APID is not a valid number
  895. *
  896. * 2. -ENODEV
  897. * The APID exceeds the maximum value configured for the system
  898. *
  899. * 3. -EADDRNOTAVAIL
  900. * An APQN derived from the cross product of the APID being assigned
  901. * and the APQIs previously assigned is not bound to the vfio_ap device
  902. * driver; or, if no APQIs have yet been assigned, the APID is not
  903. * contained in an APQN bound to the vfio_ap device driver.
  904. *
  905. * 4. -EADDRINUSE
  906. * An APQN derived from the cross product of the APID being assigned
  907. * and the APQIs previously assigned is being used by another mediated
  908. * matrix device
  909. *
  910. * 5. -EAGAIN
  911. * A lock required to validate the mdev's AP configuration could not
  912. * be obtained.
  913. */
  914. static ssize_t assign_adapter_store(struct device *dev,
  915. struct device_attribute *attr,
  916. const char *buf, size_t count)
  917. {
  918. int ret;
  919. unsigned long apid;
  920. DECLARE_BITMAP(apm_filtered, AP_DEVICES);
  921. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  922. mutex_lock(&ap_perms_mutex);
  923. get_update_locks_for_mdev(matrix_mdev);
  924. ret = kstrtoul(buf, 0, &apid);
  925. if (ret)
  926. goto done;
  927. if (apid > matrix_mdev->matrix.apm_max) {
  928. ret = -ENODEV;
  929. goto done;
  930. }
  931. if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
  932. ret = count;
  933. goto done;
  934. }
  935. set_bit_inv(apid, matrix_mdev->matrix.apm);
  936. ret = vfio_ap_mdev_validate_masks(matrix_mdev);
  937. if (ret) {
  938. clear_bit_inv(apid, matrix_mdev->matrix.apm);
  939. goto done;
  940. }
  941. vfio_ap_mdev_link_adapter(matrix_mdev, apid);
  942. if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
  943. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  944. reset_queues_for_apids(matrix_mdev, apm_filtered);
  945. }
  946. ret = count;
  947. done:
  948. release_update_locks_for_mdev(matrix_mdev);
  949. mutex_unlock(&ap_perms_mutex);
  950. return ret;
  951. }
  952. static DEVICE_ATTR_WO(assign_adapter);
  953. static struct vfio_ap_queue
  954. *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
  955. unsigned long apid, unsigned long apqi)
  956. {
  957. struct vfio_ap_queue *q = NULL;
  958. q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
  959. /* If the queue is assigned to the matrix mdev, unlink it. */
  960. if (q)
  961. vfio_ap_unlink_queue_fr_mdev(q);
  962. return q;
  963. }
  964. /**
  965. * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
  966. * adapter from the matrix mdev to which the
  967. * adapter was assigned.
  968. * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
  969. * @apid: the APID of the unassigned adapter.
  970. * @qlist: list for storing queues associated with unassigned adapter that
  971. * need to be reset.
  972. */
  973. static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
  974. unsigned long apid,
  975. struct list_head *qlist)
  976. {
  977. unsigned long apqi;
  978. struct vfio_ap_queue *q;
  979. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
  980. q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
  981. if (q && qlist) {
  982. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
  983. test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
  984. list_add_tail(&q->reset_qnode, qlist);
  985. }
  986. }
  987. }
  988. static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev,
  989. unsigned long *apids)
  990. {
  991. struct vfio_ap_queue *q, *tmpq;
  992. struct list_head qlist;
  993. unsigned long apid;
  994. bool apcb_update = false;
  995. INIT_LIST_HEAD(&qlist);
  996. for_each_set_bit_inv(apid, apids, AP_DEVICES) {
  997. vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
  998. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
  999. clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
  1000. apcb_update = true;
  1001. }
  1002. }
  1003. /* Only update apcb if needed to avoid impacting guest */
  1004. if (apcb_update)
  1005. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1006. vfio_ap_mdev_reset_qlist(&qlist);
  1007. list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
  1008. vfio_ap_unlink_mdev_fr_queue(q);
  1009. list_del(&q->reset_qnode);
  1010. }
  1011. }
  1012. static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
  1013. unsigned long apid)
  1014. {
  1015. DECLARE_BITMAP(apids, AP_DEVICES);
  1016. bitmap_zero(apids, AP_DEVICES);
  1017. set_bit_inv(apid, apids);
  1018. vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids);
  1019. }
  1020. /**
  1021. * unassign_adapter_store - parses the APID from @buf and clears the
  1022. * corresponding bit in the mediated matrix device's APM
  1023. *
  1024. * @dev: the matrix device
  1025. * @attr: the mediated matrix device's unassign_adapter attribute
  1026. * @buf: a buffer containing the adapter number (APID) to be unassigned
  1027. * @count: the number of bytes in @buf
  1028. *
  1029. * Return: the number of bytes processed if the APID is valid; otherwise,
  1030. * returns one of the following errors:
  1031. * -EINVAL if the APID is not a number
  1032. * -ENODEV if the APID it exceeds the maximum value configured for the
  1033. * system
  1034. */
  1035. static ssize_t unassign_adapter_store(struct device *dev,
  1036. struct device_attribute *attr,
  1037. const char *buf, size_t count)
  1038. {
  1039. int ret;
  1040. unsigned long apid;
  1041. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1042. get_update_locks_for_mdev(matrix_mdev);
  1043. ret = kstrtoul(buf, 0, &apid);
  1044. if (ret)
  1045. goto done;
  1046. if (apid > matrix_mdev->matrix.apm_max) {
  1047. ret = -ENODEV;
  1048. goto done;
  1049. }
  1050. if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
  1051. ret = count;
  1052. goto done;
  1053. }
  1054. clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
  1055. vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
  1056. ret = count;
  1057. done:
  1058. release_update_locks_for_mdev(matrix_mdev);
  1059. return ret;
  1060. }
  1061. static DEVICE_ATTR_WO(unassign_adapter);
  1062. static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
  1063. unsigned long apqi)
  1064. {
  1065. unsigned long apid;
  1066. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
  1067. vfio_ap_mdev_link_apqn(matrix_mdev,
  1068. AP_MKQID(apid, apqi));
  1069. }
  1070. /**
  1071. * assign_domain_store - parses the APQI from @buf and sets the
  1072. * corresponding bit in the mediated matrix device's AQM
  1073. *
  1074. * @dev: the matrix device
  1075. * @attr: the mediated matrix device's assign_domain attribute
  1076. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  1077. * be assigned
  1078. * @count: the number of bytes in @buf
  1079. *
  1080. * Return: the number of bytes processed if the APQI is valid; otherwise returns
  1081. * one of the following errors:
  1082. *
  1083. * 1. -EINVAL
  1084. * The APQI is not a valid number
  1085. *
  1086. * 2. -ENODEV
  1087. * The APQI exceeds the maximum value configured for the system
  1088. *
  1089. * 3. -EADDRNOTAVAIL
  1090. * An APQN derived from the cross product of the APQI being assigned
  1091. * and the APIDs previously assigned is not bound to the vfio_ap device
  1092. * driver; or, if no APIDs have yet been assigned, the APQI is not
  1093. * contained in an APQN bound to the vfio_ap device driver.
  1094. *
  1095. * 4. -EADDRINUSE
  1096. * An APQN derived from the cross product of the APQI being assigned
  1097. * and the APIDs previously assigned is being used by another mediated
  1098. * matrix device
  1099. *
  1100. * 5. -EAGAIN
  1101. * The lock required to validate the mdev's AP configuration could not
  1102. * be obtained.
  1103. */
  1104. static ssize_t assign_domain_store(struct device *dev,
  1105. struct device_attribute *attr,
  1106. const char *buf, size_t count)
  1107. {
  1108. int ret;
  1109. unsigned long apqi;
  1110. DECLARE_BITMAP(apm_filtered, AP_DEVICES);
  1111. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1112. mutex_lock(&ap_perms_mutex);
  1113. get_update_locks_for_mdev(matrix_mdev);
  1114. ret = kstrtoul(buf, 0, &apqi);
  1115. if (ret)
  1116. goto done;
  1117. if (apqi > matrix_mdev->matrix.aqm_max) {
  1118. ret = -ENODEV;
  1119. goto done;
  1120. }
  1121. if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
  1122. ret = count;
  1123. goto done;
  1124. }
  1125. set_bit_inv(apqi, matrix_mdev->matrix.aqm);
  1126. ret = vfio_ap_mdev_validate_masks(matrix_mdev);
  1127. if (ret) {
  1128. clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
  1129. goto done;
  1130. }
  1131. vfio_ap_mdev_link_domain(matrix_mdev, apqi);
  1132. if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
  1133. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1134. reset_queues_for_apids(matrix_mdev, apm_filtered);
  1135. }
  1136. ret = count;
  1137. done:
  1138. release_update_locks_for_mdev(matrix_mdev);
  1139. mutex_unlock(&ap_perms_mutex);
  1140. return ret;
  1141. }
  1142. static DEVICE_ATTR_WO(assign_domain);
  1143. static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
  1144. unsigned long apqi,
  1145. struct list_head *qlist)
  1146. {
  1147. unsigned long apid;
  1148. struct vfio_ap_queue *q;
  1149. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
  1150. q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
  1151. if (q && qlist) {
  1152. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
  1153. test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
  1154. list_add_tail(&q->reset_qnode, qlist);
  1155. }
  1156. }
  1157. }
  1158. static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev,
  1159. unsigned long *apqis)
  1160. {
  1161. struct vfio_ap_queue *q, *tmpq;
  1162. struct list_head qlist;
  1163. unsigned long apqi;
  1164. bool apcb_update = false;
  1165. INIT_LIST_HEAD(&qlist);
  1166. for_each_set_bit_inv(apqi, apqis, AP_DOMAINS) {
  1167. vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
  1168. if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
  1169. clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
  1170. apcb_update = true;
  1171. }
  1172. }
  1173. /* Only update apcb if needed to avoid impacting guest */
  1174. if (apcb_update)
  1175. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1176. vfio_ap_mdev_reset_qlist(&qlist);
  1177. list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
  1178. vfio_ap_unlink_mdev_fr_queue(q);
  1179. list_del(&q->reset_qnode);
  1180. }
  1181. }
  1182. static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
  1183. unsigned long apqi)
  1184. {
  1185. DECLARE_BITMAP(apqis, AP_DOMAINS);
  1186. bitmap_zero(apqis, AP_DEVICES);
  1187. set_bit_inv(apqi, apqis);
  1188. vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis);
  1189. }
  1190. /**
  1191. * unassign_domain_store - parses the APQI from @buf and clears the
  1192. * corresponding bit in the mediated matrix device's AQM
  1193. *
  1194. * @dev: the matrix device
  1195. * @attr: the mediated matrix device's unassign_domain attribute
  1196. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  1197. * be unassigned
  1198. * @count: the number of bytes in @buf
  1199. *
  1200. * Return: the number of bytes processed if the APQI is valid; otherwise,
  1201. * returns one of the following errors:
  1202. * -EINVAL if the APQI is not a number
  1203. * -ENODEV if the APQI exceeds the maximum value configured for the system
  1204. */
  1205. static ssize_t unassign_domain_store(struct device *dev,
  1206. struct device_attribute *attr,
  1207. const char *buf, size_t count)
  1208. {
  1209. int ret;
  1210. unsigned long apqi;
  1211. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1212. get_update_locks_for_mdev(matrix_mdev);
  1213. ret = kstrtoul(buf, 0, &apqi);
  1214. if (ret)
  1215. goto done;
  1216. if (apqi > matrix_mdev->matrix.aqm_max) {
  1217. ret = -ENODEV;
  1218. goto done;
  1219. }
  1220. if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
  1221. ret = count;
  1222. goto done;
  1223. }
  1224. clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
  1225. vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
  1226. ret = count;
  1227. done:
  1228. release_update_locks_for_mdev(matrix_mdev);
  1229. return ret;
  1230. }
  1231. static DEVICE_ATTR_WO(unassign_domain);
  1232. /**
  1233. * assign_control_domain_store - parses the domain ID from @buf and sets
  1234. * the corresponding bit in the mediated matrix device's ADM
  1235. *
  1236. * @dev: the matrix device
  1237. * @attr: the mediated matrix device's assign_control_domain attribute
  1238. * @buf: a buffer containing the domain ID to be assigned
  1239. * @count: the number of bytes in @buf
  1240. *
  1241. * Return: the number of bytes processed if the domain ID is valid; otherwise,
  1242. * returns one of the following errors:
  1243. * -EINVAL if the ID is not a number
  1244. * -ENODEV if the ID exceeds the maximum value configured for the system
  1245. */
  1246. static ssize_t assign_control_domain_store(struct device *dev,
  1247. struct device_attribute *attr,
  1248. const char *buf, size_t count)
  1249. {
  1250. int ret;
  1251. unsigned long id;
  1252. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1253. get_update_locks_for_mdev(matrix_mdev);
  1254. ret = kstrtoul(buf, 0, &id);
  1255. if (ret)
  1256. goto done;
  1257. if (id > matrix_mdev->matrix.adm_max) {
  1258. ret = -ENODEV;
  1259. goto done;
  1260. }
  1261. if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
  1262. ret = count;
  1263. goto done;
  1264. }
  1265. /* Set the bit in the ADM (bitmask) corresponding to the AP control
  1266. * domain number (id). The bits in the mask, from most significant to
  1267. * least significant, correspond to IDs 0 up to the one less than the
  1268. * number of control domains that can be assigned.
  1269. */
  1270. set_bit_inv(id, matrix_mdev->matrix.adm);
  1271. if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
  1272. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1273. ret = count;
  1274. done:
  1275. release_update_locks_for_mdev(matrix_mdev);
  1276. return ret;
  1277. }
  1278. static DEVICE_ATTR_WO(assign_control_domain);
  1279. /**
  1280. * unassign_control_domain_store - parses the domain ID from @buf and
  1281. * clears the corresponding bit in the mediated matrix device's ADM
  1282. *
  1283. * @dev: the matrix device
  1284. * @attr: the mediated matrix device's unassign_control_domain attribute
  1285. * @buf: a buffer containing the domain ID to be unassigned
  1286. * @count: the number of bytes in @buf
  1287. *
  1288. * Return: the number of bytes processed if the domain ID is valid; otherwise,
  1289. * returns one of the following errors:
  1290. * -EINVAL if the ID is not a number
  1291. * -ENODEV if the ID exceeds the maximum value configured for the system
  1292. */
  1293. static ssize_t unassign_control_domain_store(struct device *dev,
  1294. struct device_attribute *attr,
  1295. const char *buf, size_t count)
  1296. {
  1297. int ret;
  1298. unsigned long domid;
  1299. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1300. get_update_locks_for_mdev(matrix_mdev);
  1301. ret = kstrtoul(buf, 0, &domid);
  1302. if (ret)
  1303. goto done;
  1304. if (domid > matrix_mdev->matrix.adm_max) {
  1305. ret = -ENODEV;
  1306. goto done;
  1307. }
  1308. if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
  1309. ret = count;
  1310. goto done;
  1311. }
  1312. clear_bit_inv(domid, matrix_mdev->matrix.adm);
  1313. if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
  1314. clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
  1315. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1316. }
  1317. ret = count;
  1318. done:
  1319. release_update_locks_for_mdev(matrix_mdev);
  1320. return ret;
  1321. }
  1322. static DEVICE_ATTR_WO(unassign_control_domain);
  1323. static ssize_t control_domains_show(struct device *dev,
  1324. struct device_attribute *dev_attr,
  1325. char *buf)
  1326. {
  1327. unsigned long id;
  1328. int nchars = 0;
  1329. int n;
  1330. char *bufpos = buf;
  1331. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1332. unsigned long max_domid = matrix_mdev->matrix.adm_max;
  1333. mutex_lock(&matrix_dev->mdevs_lock);
  1334. for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
  1335. n = sprintf(bufpos, "%04lx\n", id);
  1336. bufpos += n;
  1337. nchars += n;
  1338. }
  1339. mutex_unlock(&matrix_dev->mdevs_lock);
  1340. return nchars;
  1341. }
  1342. static DEVICE_ATTR_RO(control_domains);
  1343. static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
  1344. {
  1345. char *bufpos = buf;
  1346. unsigned long apid;
  1347. unsigned long apqi;
  1348. unsigned long apid1;
  1349. unsigned long apqi1;
  1350. unsigned long napm_bits = matrix->apm_max + 1;
  1351. unsigned long naqm_bits = matrix->aqm_max + 1;
  1352. int nchars = 0;
  1353. int n;
  1354. apid1 = find_first_bit_inv(matrix->apm, napm_bits);
  1355. apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
  1356. if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
  1357. for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
  1358. for_each_set_bit_inv(apqi, matrix->aqm,
  1359. naqm_bits) {
  1360. n = sprintf(bufpos, "%02lx.%04lx\n", apid,
  1361. apqi);
  1362. bufpos += n;
  1363. nchars += n;
  1364. }
  1365. }
  1366. } else if (apid1 < napm_bits) {
  1367. for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
  1368. n = sprintf(bufpos, "%02lx.\n", apid);
  1369. bufpos += n;
  1370. nchars += n;
  1371. }
  1372. } else if (apqi1 < naqm_bits) {
  1373. for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
  1374. n = sprintf(bufpos, ".%04lx\n", apqi);
  1375. bufpos += n;
  1376. nchars += n;
  1377. }
  1378. }
  1379. return nchars;
  1380. }
  1381. static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
  1382. char *buf)
  1383. {
  1384. ssize_t nchars;
  1385. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1386. mutex_lock(&matrix_dev->mdevs_lock);
  1387. nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
  1388. mutex_unlock(&matrix_dev->mdevs_lock);
  1389. return nchars;
  1390. }
  1391. static DEVICE_ATTR_RO(matrix);
  1392. static ssize_t guest_matrix_show(struct device *dev,
  1393. struct device_attribute *attr, char *buf)
  1394. {
  1395. ssize_t nchars;
  1396. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1397. mutex_lock(&matrix_dev->mdevs_lock);
  1398. nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
  1399. mutex_unlock(&matrix_dev->mdevs_lock);
  1400. return nchars;
  1401. }
  1402. static DEVICE_ATTR_RO(guest_matrix);
  1403. static ssize_t write_ap_bitmap(unsigned long *bitmap, char *buf, int offset, char sep)
  1404. {
  1405. return sysfs_emit_at(buf, offset, "0x%016lx%016lx%016lx%016lx%c",
  1406. bitmap[0], bitmap[1], bitmap[2], bitmap[3], sep);
  1407. }
  1408. static ssize_t ap_config_show(struct device *dev, struct device_attribute *attr,
  1409. char *buf)
  1410. {
  1411. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1412. int idx = 0;
  1413. idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ',');
  1414. idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ',');
  1415. idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n');
  1416. return idx;
  1417. }
  1418. /* Number of characters needed for a complete hex mask representing the bits in .. */
  1419. #define AP_DEVICES_STRLEN (AP_DEVICES / 4 + 3)
  1420. #define AP_DOMAINS_STRLEN (AP_DOMAINS / 4 + 3)
  1421. #define AP_CONFIG_STRLEN (AP_DEVICES_STRLEN + 2 * AP_DOMAINS_STRLEN)
  1422. static int parse_bitmap(char **strbufptr, unsigned long *bitmap, int nbits)
  1423. {
  1424. char *curmask;
  1425. curmask = strsep(strbufptr, ",\n");
  1426. if (!curmask)
  1427. return -EINVAL;
  1428. bitmap_clear(bitmap, 0, nbits);
  1429. return ap_hex2bitmap(curmask, bitmap, nbits);
  1430. }
  1431. static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev)
  1432. {
  1433. unsigned long bit;
  1434. for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) {
  1435. if (bit > matrix_mdev->matrix.apm_max)
  1436. return -ENODEV;
  1437. }
  1438. for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) {
  1439. if (bit > matrix_mdev->matrix.aqm_max)
  1440. return -ENODEV;
  1441. }
  1442. for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) {
  1443. if (bit > matrix_mdev->matrix.adm_max)
  1444. return -ENODEV;
  1445. }
  1446. return 0;
  1447. }
  1448. static void ap_matrix_copy(struct ap_matrix *dst, struct ap_matrix *src)
  1449. {
  1450. /* This check works around false positive gcc -Wstringop-overread */
  1451. if (!src)
  1452. return;
  1453. bitmap_copy(dst->apm, src->apm, AP_DEVICES);
  1454. bitmap_copy(dst->aqm, src->aqm, AP_DOMAINS);
  1455. bitmap_copy(dst->adm, src->adm, AP_DOMAINS);
  1456. }
  1457. static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr,
  1458. const char *buf, size_t count)
  1459. {
  1460. struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
  1461. struct ap_matrix m_new, m_old, m_added, m_removed;
  1462. DECLARE_BITMAP(apm_filtered, AP_DEVICES);
  1463. unsigned long newbit;
  1464. char *newbuf, *rest;
  1465. int rc = count;
  1466. bool do_update;
  1467. newbuf = kstrndup(buf, AP_CONFIG_STRLEN, GFP_KERNEL);
  1468. if (!newbuf)
  1469. return -ENOMEM;
  1470. rest = newbuf;
  1471. mutex_lock(&ap_perms_mutex);
  1472. get_update_locks_for_mdev(matrix_mdev);
  1473. /* Save old state */
  1474. ap_matrix_copy(&m_old, &matrix_mdev->matrix);
  1475. if (parse_bitmap(&rest, m_new.apm, AP_DEVICES) ||
  1476. parse_bitmap(&rest, m_new.aqm, AP_DOMAINS) ||
  1477. parse_bitmap(&rest, m_new.adm, AP_DOMAINS)) {
  1478. rc = -EINVAL;
  1479. goto out;
  1480. }
  1481. bitmap_andnot(m_removed.apm, m_old.apm, m_new.apm, AP_DEVICES);
  1482. bitmap_andnot(m_removed.aqm, m_old.aqm, m_new.aqm, AP_DOMAINS);
  1483. bitmap_andnot(m_added.apm, m_new.apm, m_old.apm, AP_DEVICES);
  1484. bitmap_andnot(m_added.aqm, m_new.aqm, m_old.aqm, AP_DOMAINS);
  1485. /* Need new bitmaps in matrix_mdev for validation */
  1486. ap_matrix_copy(&matrix_mdev->matrix, &m_new);
  1487. /* Ensure new state is valid, else undo new state */
  1488. rc = vfio_ap_mdev_validate_masks(matrix_mdev);
  1489. if (rc) {
  1490. ap_matrix_copy(&matrix_mdev->matrix, &m_old);
  1491. goto out;
  1492. }
  1493. rc = ap_matrix_overflow_check(matrix_mdev);
  1494. if (rc) {
  1495. ap_matrix_copy(&matrix_mdev->matrix, &m_old);
  1496. goto out;
  1497. }
  1498. rc = count;
  1499. /* Need old bitmaps in matrix_mdev for unplug/unlink */
  1500. ap_matrix_copy(&matrix_mdev->matrix, &m_old);
  1501. /* Unlink removed adapters/domains */
  1502. vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm);
  1503. vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm);
  1504. /* Need new bitmaps in matrix_mdev for linking new adapters/domains */
  1505. ap_matrix_copy(&matrix_mdev->matrix, &m_new);
  1506. /* Link newly added adapters */
  1507. for_each_set_bit_inv(newbit, m_added.apm, AP_DEVICES)
  1508. vfio_ap_mdev_link_adapter(matrix_mdev, newbit);
  1509. for_each_set_bit_inv(newbit, m_added.aqm, AP_DOMAINS)
  1510. vfio_ap_mdev_link_domain(matrix_mdev, newbit);
  1511. /* filter resources not bound to vfio-ap */
  1512. do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
  1513. do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
  1514. /* Apply changes to shadow apbc if things changed */
  1515. if (do_update) {
  1516. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1517. reset_queues_for_apids(matrix_mdev, apm_filtered);
  1518. }
  1519. out:
  1520. release_update_locks_for_mdev(matrix_mdev);
  1521. mutex_unlock(&ap_perms_mutex);
  1522. kfree(newbuf);
  1523. return rc;
  1524. }
  1525. static DEVICE_ATTR_RW(ap_config);
  1526. static struct attribute *vfio_ap_mdev_attrs[] = {
  1527. &dev_attr_assign_adapter.attr,
  1528. &dev_attr_unassign_adapter.attr,
  1529. &dev_attr_assign_domain.attr,
  1530. &dev_attr_unassign_domain.attr,
  1531. &dev_attr_assign_control_domain.attr,
  1532. &dev_attr_unassign_control_domain.attr,
  1533. &dev_attr_ap_config.attr,
  1534. &dev_attr_control_domains.attr,
  1535. &dev_attr_matrix.attr,
  1536. &dev_attr_guest_matrix.attr,
  1537. NULL,
  1538. };
  1539. static struct attribute_group vfio_ap_mdev_attr_group = {
  1540. .attrs = vfio_ap_mdev_attrs
  1541. };
  1542. static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  1543. &vfio_ap_mdev_attr_group,
  1544. NULL
  1545. };
  1546. /**
  1547. * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
  1548. * to manage AP resources for the guest whose state is represented by @kvm
  1549. *
  1550. * @matrix_mdev: a mediated matrix device
  1551. * @kvm: reference to KVM instance
  1552. *
  1553. * Return: 0 if no other mediated matrix device has a reference to @kvm;
  1554. * otherwise, returns an -EPERM.
  1555. */
  1556. static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
  1557. struct kvm *kvm)
  1558. {
  1559. struct ap_matrix_mdev *m;
  1560. if (kvm->arch.crypto.crycbd) {
  1561. down_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1562. kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
  1563. up_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1564. get_update_locks_for_kvm(kvm);
  1565. list_for_each_entry(m, &matrix_dev->mdev_list, node) {
  1566. if (m != matrix_mdev && m->kvm == kvm) {
  1567. release_update_locks_for_kvm(kvm);
  1568. return -EPERM;
  1569. }
  1570. }
  1571. kvm_get_kvm(kvm);
  1572. matrix_mdev->kvm = kvm;
  1573. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  1574. release_update_locks_for_kvm(kvm);
  1575. }
  1576. return 0;
  1577. }
  1578. static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length)
  1579. {
  1580. struct ap_queue_table *qtable = &matrix_mdev->qtable;
  1581. struct vfio_ap_queue *q;
  1582. int loop_cursor;
  1583. hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
  1584. if (q->saved_iova >= iova && q->saved_iova < iova + length)
  1585. vfio_ap_irq_disable(q);
  1586. }
  1587. }
  1588. static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
  1589. u64 length)
  1590. {
  1591. struct ap_matrix_mdev *matrix_mdev =
  1592. container_of(vdev, struct ap_matrix_mdev, vdev);
  1593. mutex_lock(&matrix_dev->mdevs_lock);
  1594. unmap_iova(matrix_mdev, iova, length);
  1595. mutex_unlock(&matrix_dev->mdevs_lock);
  1596. }
  1597. /**
  1598. * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
  1599. * by @matrix_mdev.
  1600. *
  1601. * @matrix_mdev: a matrix mediated device
  1602. */
  1603. static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
  1604. {
  1605. struct kvm *kvm = matrix_mdev->kvm;
  1606. if (kvm && kvm->arch.crypto.crycbd) {
  1607. down_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1608. kvm->arch.crypto.pqap_hook = NULL;
  1609. up_write(&kvm->arch.crypto.pqap_hook_rwsem);
  1610. get_update_locks_for_kvm(kvm);
  1611. kvm_arch_crypto_clear_masks(kvm);
  1612. vfio_ap_mdev_reset_queues(matrix_mdev);
  1613. kvm_put_kvm(kvm);
  1614. matrix_mdev->kvm = NULL;
  1615. release_update_locks_for_kvm(kvm);
  1616. }
  1617. }
  1618. static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
  1619. {
  1620. struct ap_queue *queue;
  1621. struct vfio_ap_queue *q = NULL;
  1622. queue = ap_get_qdev(apqn);
  1623. if (!queue)
  1624. return NULL;
  1625. if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
  1626. q = dev_get_drvdata(&queue->ap_dev.device);
  1627. put_device(&queue->ap_dev.device);
  1628. return q;
  1629. }
  1630. static int apq_status_check(int apqn, struct ap_queue_status *status)
  1631. {
  1632. switch (status->response_code) {
  1633. case AP_RESPONSE_NORMAL:
  1634. case AP_RESPONSE_DECONFIGURED:
  1635. case AP_RESPONSE_CHECKSTOPPED:
  1636. return 0;
  1637. case AP_RESPONSE_RESET_IN_PROGRESS:
  1638. case AP_RESPONSE_BUSY:
  1639. return -EBUSY;
  1640. case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE:
  1641. case AP_RESPONSE_ASSOC_FAILED:
  1642. /*
  1643. * These asynchronous response codes indicate a PQAP(AAPQ)
  1644. * instruction to associate a secret with the guest failed. All
  1645. * subsequent AP instructions will end with the asynchronous
  1646. * response code until the AP queue is reset; so, let's return
  1647. * a value indicating a reset needs to be performed again.
  1648. */
  1649. return -EAGAIN;
  1650. default:
  1651. WARN(true,
  1652. "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n",
  1653. AP_QID_CARD(apqn), AP_QID_QUEUE(apqn),
  1654. status->response_code);
  1655. return -EIO;
  1656. }
  1657. }
  1658. #define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)"
  1659. static void apq_reset_check(struct work_struct *reset_work)
  1660. {
  1661. int ret = -EBUSY, elapsed = 0;
  1662. struct ap_queue_status status;
  1663. struct vfio_ap_queue *q;
  1664. q = container_of(reset_work, struct vfio_ap_queue, reset_work);
  1665. memcpy(&status, &q->reset_status, sizeof(status));
  1666. while (true) {
  1667. msleep(AP_RESET_INTERVAL);
  1668. elapsed += AP_RESET_INTERVAL;
  1669. status = ap_tapq(q->apqn, NULL);
  1670. ret = apq_status_check(q->apqn, &status);
  1671. if (ret == -EIO)
  1672. return;
  1673. if (ret == -EBUSY) {
  1674. pr_notice_ratelimited(WAIT_MSG, elapsed,
  1675. AP_QID_CARD(q->apqn),
  1676. AP_QID_QUEUE(q->apqn),
  1677. status.response_code,
  1678. status.queue_empty,
  1679. status.irq_enabled);
  1680. } else {
  1681. if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS ||
  1682. q->reset_status.response_code == AP_RESPONSE_BUSY ||
  1683. q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS ||
  1684. ret == -EAGAIN) {
  1685. status = ap_zapq(q->apqn, 0);
  1686. memcpy(&q->reset_status, &status, sizeof(status));
  1687. continue;
  1688. }
  1689. if (q->saved_isc != VFIO_AP_ISC_INVALID)
  1690. vfio_ap_free_aqic_resources(q);
  1691. break;
  1692. }
  1693. }
  1694. }
  1695. static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
  1696. {
  1697. struct ap_queue_status status;
  1698. if (!q)
  1699. return;
  1700. status = ap_zapq(q->apqn, 0);
  1701. memcpy(&q->reset_status, &status, sizeof(status));
  1702. switch (status.response_code) {
  1703. case AP_RESPONSE_NORMAL:
  1704. case AP_RESPONSE_RESET_IN_PROGRESS:
  1705. case AP_RESPONSE_BUSY:
  1706. case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
  1707. /*
  1708. * Let's verify whether the ZAPQ completed successfully on a work queue.
  1709. */
  1710. queue_work(system_long_wq, &q->reset_work);
  1711. break;
  1712. case AP_RESPONSE_DECONFIGURED:
  1713. case AP_RESPONSE_CHECKSTOPPED:
  1714. vfio_ap_free_aqic_resources(q);
  1715. break;
  1716. default:
  1717. WARN(true,
  1718. "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
  1719. AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
  1720. status.response_code);
  1721. }
  1722. }
  1723. static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
  1724. {
  1725. int ret = 0, loop_cursor;
  1726. struct vfio_ap_queue *q;
  1727. hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
  1728. vfio_ap_mdev_reset_queue(q);
  1729. hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
  1730. flush_work(&q->reset_work);
  1731. if (q->reset_status.response_code)
  1732. ret = -EIO;
  1733. }
  1734. return ret;
  1735. }
  1736. static int vfio_ap_mdev_reset_qlist(struct list_head *qlist)
  1737. {
  1738. int ret = 0;
  1739. struct vfio_ap_queue *q;
  1740. list_for_each_entry(q, qlist, reset_qnode)
  1741. vfio_ap_mdev_reset_queue(q);
  1742. list_for_each_entry(q, qlist, reset_qnode) {
  1743. flush_work(&q->reset_work);
  1744. if (q->reset_status.response_code)
  1745. ret = -EIO;
  1746. }
  1747. return ret;
  1748. }
  1749. static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
  1750. {
  1751. struct ap_matrix_mdev *matrix_mdev =
  1752. container_of(vdev, struct ap_matrix_mdev, vdev);
  1753. if (!vdev->kvm)
  1754. return -EINVAL;
  1755. return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
  1756. }
  1757. static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
  1758. {
  1759. struct ap_matrix_mdev *matrix_mdev =
  1760. container_of(vdev, struct ap_matrix_mdev, vdev);
  1761. vfio_ap_mdev_unset_kvm(matrix_mdev);
  1762. }
  1763. static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
  1764. {
  1765. struct device *dev = vdev->dev;
  1766. struct ap_matrix_mdev *matrix_mdev;
  1767. matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
  1768. if (matrix_mdev->req_trigger) {
  1769. if (!(count % 10))
  1770. dev_notice_ratelimited(dev,
  1771. "Relaying device request to user (#%u)\n",
  1772. count);
  1773. eventfd_signal(matrix_mdev->req_trigger);
  1774. } else if (count == 0) {
  1775. dev_notice(dev,
  1776. "No device request registered, blocked until released by user\n");
  1777. }
  1778. }
  1779. static int vfio_ap_mdev_get_device_info(unsigned long arg)
  1780. {
  1781. unsigned long minsz;
  1782. struct vfio_device_info info;
  1783. minsz = offsetofend(struct vfio_device_info, num_irqs);
  1784. if (copy_from_user(&info, (void __user *)arg, minsz))
  1785. return -EFAULT;
  1786. if (info.argsz < minsz)
  1787. return -EINVAL;
  1788. info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
  1789. info.num_regions = 0;
  1790. info.num_irqs = VFIO_AP_NUM_IRQS;
  1791. return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
  1792. }
  1793. static ssize_t vfio_ap_get_irq_info(unsigned long arg)
  1794. {
  1795. unsigned long minsz;
  1796. struct vfio_irq_info info;
  1797. minsz = offsetofend(struct vfio_irq_info, count);
  1798. if (copy_from_user(&info, (void __user *)arg, minsz))
  1799. return -EFAULT;
  1800. if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS)
  1801. return -EINVAL;
  1802. switch (info.index) {
  1803. case VFIO_AP_REQ_IRQ_INDEX:
  1804. info.count = 1;
  1805. info.flags = VFIO_IRQ_INFO_EVENTFD;
  1806. break;
  1807. default:
  1808. return -EINVAL;
  1809. }
  1810. return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
  1811. }
  1812. static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg)
  1813. {
  1814. int ret;
  1815. size_t data_size;
  1816. unsigned long minsz;
  1817. minsz = offsetofend(struct vfio_irq_set, count);
  1818. if (copy_from_user(irq_set, (void __user *)arg, minsz))
  1819. return -EFAULT;
  1820. ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS,
  1821. &data_size);
  1822. if (ret)
  1823. return ret;
  1824. if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER))
  1825. return -EINVAL;
  1826. return 0;
  1827. }
  1828. static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
  1829. unsigned long arg)
  1830. {
  1831. s32 fd;
  1832. void __user *data;
  1833. unsigned long minsz;
  1834. struct eventfd_ctx *req_trigger;
  1835. minsz = offsetofend(struct vfio_irq_set, count);
  1836. data = (void __user *)(arg + minsz);
  1837. if (get_user(fd, (s32 __user *)data))
  1838. return -EFAULT;
  1839. if (fd == -1) {
  1840. if (matrix_mdev->req_trigger)
  1841. eventfd_ctx_put(matrix_mdev->req_trigger);
  1842. matrix_mdev->req_trigger = NULL;
  1843. } else if (fd >= 0) {
  1844. req_trigger = eventfd_ctx_fdget(fd);
  1845. if (IS_ERR(req_trigger))
  1846. return PTR_ERR(req_trigger);
  1847. if (matrix_mdev->req_trigger)
  1848. eventfd_ctx_put(matrix_mdev->req_trigger);
  1849. matrix_mdev->req_trigger = req_trigger;
  1850. } else {
  1851. return -EINVAL;
  1852. }
  1853. return 0;
  1854. }
  1855. static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
  1856. unsigned long arg)
  1857. {
  1858. int ret;
  1859. struct vfio_irq_set irq_set;
  1860. ret = vfio_ap_irq_set_init(&irq_set, arg);
  1861. if (ret)
  1862. return ret;
  1863. switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
  1864. case VFIO_IRQ_SET_DATA_EVENTFD:
  1865. switch (irq_set.index) {
  1866. case VFIO_AP_REQ_IRQ_INDEX:
  1867. return vfio_ap_set_request_irq(matrix_mdev, arg);
  1868. default:
  1869. return -EINVAL;
  1870. }
  1871. default:
  1872. return -EINVAL;
  1873. }
  1874. }
  1875. static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
  1876. unsigned int cmd, unsigned long arg)
  1877. {
  1878. struct ap_matrix_mdev *matrix_mdev =
  1879. container_of(vdev, struct ap_matrix_mdev, vdev);
  1880. int ret;
  1881. mutex_lock(&matrix_dev->mdevs_lock);
  1882. switch (cmd) {
  1883. case VFIO_DEVICE_GET_INFO:
  1884. ret = vfio_ap_mdev_get_device_info(arg);
  1885. break;
  1886. case VFIO_DEVICE_RESET:
  1887. ret = vfio_ap_mdev_reset_queues(matrix_mdev);
  1888. break;
  1889. case VFIO_DEVICE_GET_IRQ_INFO:
  1890. ret = vfio_ap_get_irq_info(arg);
  1891. break;
  1892. case VFIO_DEVICE_SET_IRQS:
  1893. ret = vfio_ap_set_irqs(matrix_mdev, arg);
  1894. break;
  1895. default:
  1896. ret = -EOPNOTSUPP;
  1897. break;
  1898. }
  1899. mutex_unlock(&matrix_dev->mdevs_lock);
  1900. return ret;
  1901. }
  1902. static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
  1903. {
  1904. struct ap_matrix_mdev *matrix_mdev;
  1905. unsigned long apid = AP_QID_CARD(q->apqn);
  1906. unsigned long apqi = AP_QID_QUEUE(q->apqn);
  1907. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  1908. if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
  1909. test_bit_inv(apqi, matrix_mdev->matrix.aqm))
  1910. return matrix_mdev;
  1911. }
  1912. return NULL;
  1913. }
  1914. static ssize_t status_show(struct device *dev,
  1915. struct device_attribute *attr,
  1916. char *buf)
  1917. {
  1918. ssize_t nchars = 0;
  1919. struct vfio_ap_queue *q;
  1920. unsigned long apid, apqi;
  1921. struct ap_matrix_mdev *matrix_mdev;
  1922. struct ap_device *apdev = to_ap_dev(dev);
  1923. mutex_lock(&matrix_dev->mdevs_lock);
  1924. q = dev_get_drvdata(&apdev->device);
  1925. matrix_mdev = vfio_ap_mdev_for_queue(q);
  1926. /* If the queue is assigned to the matrix mediated device, then
  1927. * determine whether it is passed through to a guest; otherwise,
  1928. * indicate that it is unassigned.
  1929. */
  1930. if (matrix_mdev) {
  1931. apid = AP_QID_CARD(q->apqn);
  1932. apqi = AP_QID_QUEUE(q->apqn);
  1933. /*
  1934. * If the queue is passed through to the guest, then indicate
  1935. * that it is in use; otherwise, indicate that it is
  1936. * merely assigned to a matrix mediated device.
  1937. */
  1938. if (matrix_mdev->kvm &&
  1939. test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
  1940. test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
  1941. nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
  1942. AP_QUEUE_IN_USE);
  1943. else
  1944. nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
  1945. AP_QUEUE_ASSIGNED);
  1946. } else {
  1947. nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
  1948. AP_QUEUE_UNASSIGNED);
  1949. }
  1950. mutex_unlock(&matrix_dev->mdevs_lock);
  1951. return nchars;
  1952. }
  1953. static DEVICE_ATTR_RO(status);
  1954. static struct attribute *vfio_queue_attrs[] = {
  1955. &dev_attr_status.attr,
  1956. NULL,
  1957. };
  1958. static const struct attribute_group vfio_queue_attr_group = {
  1959. .attrs = vfio_queue_attrs,
  1960. };
  1961. static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
  1962. .init = vfio_ap_mdev_init_dev,
  1963. .open_device = vfio_ap_mdev_open_device,
  1964. .close_device = vfio_ap_mdev_close_device,
  1965. .ioctl = vfio_ap_mdev_ioctl,
  1966. .dma_unmap = vfio_ap_mdev_dma_unmap,
  1967. .bind_iommufd = vfio_iommufd_emulated_bind,
  1968. .unbind_iommufd = vfio_iommufd_emulated_unbind,
  1969. .attach_ioas = vfio_iommufd_emulated_attach_ioas,
  1970. .detach_ioas = vfio_iommufd_emulated_detach_ioas,
  1971. .request = vfio_ap_mdev_request
  1972. };
  1973. static struct mdev_driver vfio_ap_matrix_driver = {
  1974. .device_api = VFIO_DEVICE_API_AP_STRING,
  1975. .max_instances = MAX_ZDEV_ENTRIES_EXT,
  1976. .driver = {
  1977. .name = "vfio_ap_mdev",
  1978. .owner = THIS_MODULE,
  1979. .mod_name = KBUILD_MODNAME,
  1980. .dev_groups = vfio_ap_mdev_attr_groups,
  1981. },
  1982. .probe = vfio_ap_mdev_probe,
  1983. .remove = vfio_ap_mdev_remove,
  1984. };
  1985. int vfio_ap_mdev_register(void)
  1986. {
  1987. int ret;
  1988. ret = mdev_register_driver(&vfio_ap_matrix_driver);
  1989. if (ret)
  1990. return ret;
  1991. matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
  1992. matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
  1993. matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
  1994. ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
  1995. &vfio_ap_matrix_driver,
  1996. matrix_dev->mdev_types, 1);
  1997. if (ret)
  1998. goto err_driver;
  1999. return 0;
  2000. err_driver:
  2001. mdev_unregister_driver(&vfio_ap_matrix_driver);
  2002. return ret;
  2003. }
  2004. void vfio_ap_mdev_unregister(void)
  2005. {
  2006. mdev_unregister_parent(&matrix_dev->parent);
  2007. mdev_unregister_driver(&vfio_ap_matrix_driver);
  2008. }
  2009. int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
  2010. {
  2011. int ret;
  2012. struct vfio_ap_queue *q;
  2013. DECLARE_BITMAP(apm_filtered, AP_DEVICES);
  2014. struct ap_matrix_mdev *matrix_mdev;
  2015. ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
  2016. if (ret)
  2017. return ret;
  2018. q = kzalloc(sizeof(*q), GFP_KERNEL);
  2019. if (!q) {
  2020. ret = -ENOMEM;
  2021. goto err_remove_group;
  2022. }
  2023. q->apqn = to_ap_queue(&apdev->device)->qid;
  2024. q->saved_isc = VFIO_AP_ISC_INVALID;
  2025. memset(&q->reset_status, 0, sizeof(q->reset_status));
  2026. INIT_WORK(&q->reset_work, apq_reset_check);
  2027. matrix_mdev = get_update_locks_by_apqn(q->apqn);
  2028. if (matrix_mdev) {
  2029. vfio_ap_mdev_link_queue(matrix_mdev, q);
  2030. /*
  2031. * If we're in the process of handling the adding of adapters or
  2032. * domains to the host's AP configuration, then let the
  2033. * vfio_ap device driver's on_scan_complete callback filter the
  2034. * matrix and update the guest's AP configuration after all of
  2035. * the new queue devices are probed.
  2036. */
  2037. if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
  2038. !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
  2039. goto done;
  2040. if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
  2041. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  2042. reset_queues_for_apids(matrix_mdev, apm_filtered);
  2043. }
  2044. }
  2045. done:
  2046. dev_set_drvdata(&apdev->device, q);
  2047. release_update_locks_for_mdev(matrix_mdev);
  2048. return ret;
  2049. err_remove_group:
  2050. sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
  2051. return ret;
  2052. }
  2053. void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
  2054. {
  2055. unsigned long apid, apqi;
  2056. struct vfio_ap_queue *q;
  2057. struct ap_matrix_mdev *matrix_mdev;
  2058. sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
  2059. q = dev_get_drvdata(&apdev->device);
  2060. get_update_locks_for_queue(q);
  2061. matrix_mdev = q->matrix_mdev;
  2062. apid = AP_QID_CARD(q->apqn);
  2063. apqi = AP_QID_QUEUE(q->apqn);
  2064. if (matrix_mdev) {
  2065. /* If the queue is assigned to the guest's AP configuration */
  2066. if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
  2067. test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
  2068. /*
  2069. * Since the queues are defined via a matrix of adapters
  2070. * and domains, it is not possible to hot unplug a
  2071. * single queue; so, let's unplug the adapter.
  2072. */
  2073. clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
  2074. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  2075. reset_queues_for_apid(matrix_mdev, apid);
  2076. goto done;
  2077. }
  2078. }
  2079. /*
  2080. * If the queue is not in the host's AP configuration, then resetting
  2081. * it will fail with response code 01, (APQN not valid); so, let's make
  2082. * sure it is in the host's config.
  2083. */
  2084. if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) &&
  2085. test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) {
  2086. vfio_ap_mdev_reset_queue(q);
  2087. flush_work(&q->reset_work);
  2088. }
  2089. done:
  2090. if (matrix_mdev)
  2091. vfio_ap_unlink_queue_fr_mdev(q);
  2092. dev_set_drvdata(&apdev->device, NULL);
  2093. kfree(q);
  2094. release_update_locks_for_mdev(matrix_mdev);
  2095. }
  2096. /**
  2097. * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
  2098. * assigned to a mediated device under the control
  2099. * of the vfio_ap device driver.
  2100. *
  2101. * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
  2102. * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
  2103. *
  2104. * Return:
  2105. * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
  2106. * assigned to a mediated device under the control of the vfio_ap
  2107. * device driver.
  2108. * * Otherwise, return 0.
  2109. */
  2110. int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
  2111. {
  2112. int ret;
  2113. mutex_lock(&matrix_dev->guests_lock);
  2114. mutex_lock(&matrix_dev->mdevs_lock);
  2115. ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
  2116. mutex_unlock(&matrix_dev->mdevs_lock);
  2117. mutex_unlock(&matrix_dev->guests_lock);
  2118. return ret;
  2119. }
  2120. /**
  2121. * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
  2122. * domains that have been removed from the host's
  2123. * AP configuration from a guest.
  2124. *
  2125. * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
  2126. * @aprem: the adapters that have been removed from the host's AP configuration
  2127. * @aqrem: the domains that have been removed from the host's AP configuration
  2128. * @cdrem: the control domains that have been removed from the host's AP
  2129. * configuration.
  2130. */
  2131. static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
  2132. unsigned long *aprem,
  2133. unsigned long *aqrem,
  2134. unsigned long *cdrem)
  2135. {
  2136. int do_hotplug = 0;
  2137. if (!bitmap_empty(aprem, AP_DEVICES)) {
  2138. do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
  2139. matrix_mdev->shadow_apcb.apm,
  2140. aprem, AP_DEVICES);
  2141. }
  2142. if (!bitmap_empty(aqrem, AP_DOMAINS)) {
  2143. do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
  2144. matrix_mdev->shadow_apcb.aqm,
  2145. aqrem, AP_DEVICES);
  2146. }
  2147. if (!bitmap_empty(cdrem, AP_DOMAINS))
  2148. do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
  2149. matrix_mdev->shadow_apcb.adm,
  2150. cdrem, AP_DOMAINS);
  2151. if (do_hotplug)
  2152. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  2153. }
  2154. /**
  2155. * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
  2156. * domains and control domains that have been removed
  2157. * from the host AP configuration and unplugs them
  2158. * from those guests.
  2159. *
  2160. * @ap_remove: bitmap specifying which adapters have been removed from the host
  2161. * config.
  2162. * @aq_remove: bitmap specifying which domains have been removed from the host
  2163. * config.
  2164. * @cd_remove: bitmap specifying which control domains have been removed from
  2165. * the host config.
  2166. */
  2167. static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
  2168. unsigned long *aq_remove,
  2169. unsigned long *cd_remove)
  2170. {
  2171. struct ap_matrix_mdev *matrix_mdev;
  2172. DECLARE_BITMAP(aprem, AP_DEVICES);
  2173. DECLARE_BITMAP(aqrem, AP_DOMAINS);
  2174. DECLARE_BITMAP(cdrem, AP_DOMAINS);
  2175. int do_remove = 0;
  2176. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  2177. mutex_lock(&matrix_mdev->kvm->lock);
  2178. mutex_lock(&matrix_dev->mdevs_lock);
  2179. do_remove |= bitmap_and(aprem, ap_remove,
  2180. matrix_mdev->matrix.apm,
  2181. AP_DEVICES);
  2182. do_remove |= bitmap_and(aqrem, aq_remove,
  2183. matrix_mdev->matrix.aqm,
  2184. AP_DOMAINS);
  2185. do_remove |= bitmap_andnot(cdrem, cd_remove,
  2186. matrix_mdev->matrix.adm,
  2187. AP_DOMAINS);
  2188. if (do_remove)
  2189. vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
  2190. cdrem);
  2191. mutex_unlock(&matrix_dev->mdevs_lock);
  2192. mutex_unlock(&matrix_mdev->kvm->lock);
  2193. }
  2194. }
  2195. /**
  2196. * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
  2197. * control domains from the host AP configuration
  2198. * by unplugging them from the guests that are
  2199. * using them.
  2200. * @cur_config_info: the current host AP configuration information
  2201. * @prev_config_info: the previous host AP configuration information
  2202. */
  2203. static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
  2204. struct ap_config_info *prev_config_info)
  2205. {
  2206. int do_remove;
  2207. DECLARE_BITMAP(aprem, AP_DEVICES);
  2208. DECLARE_BITMAP(aqrem, AP_DOMAINS);
  2209. DECLARE_BITMAP(cdrem, AP_DOMAINS);
  2210. do_remove = bitmap_andnot(aprem,
  2211. (unsigned long *)prev_config_info->apm,
  2212. (unsigned long *)cur_config_info->apm,
  2213. AP_DEVICES);
  2214. do_remove |= bitmap_andnot(aqrem,
  2215. (unsigned long *)prev_config_info->aqm,
  2216. (unsigned long *)cur_config_info->aqm,
  2217. AP_DEVICES);
  2218. do_remove |= bitmap_andnot(cdrem,
  2219. (unsigned long *)prev_config_info->adm,
  2220. (unsigned long *)cur_config_info->adm,
  2221. AP_DEVICES);
  2222. if (do_remove)
  2223. vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
  2224. }
  2225. /**
  2226. * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
  2227. * are older than AP type 10 (CEX4).
  2228. * @apm: a bitmap of the APIDs to examine
  2229. * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
  2230. */
  2231. static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
  2232. {
  2233. bool apid_cleared;
  2234. struct ap_queue_status status;
  2235. unsigned long apid, apqi;
  2236. struct ap_tapq_hwinfo info;
  2237. for_each_set_bit_inv(apid, apm, AP_DEVICES) {
  2238. apid_cleared = false;
  2239. for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
  2240. status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
  2241. switch (status.response_code) {
  2242. /*
  2243. * According to the architecture in each case
  2244. * below, the queue's info should be filled.
  2245. */
  2246. case AP_RESPONSE_NORMAL:
  2247. case AP_RESPONSE_RESET_IN_PROGRESS:
  2248. case AP_RESPONSE_DECONFIGURED:
  2249. case AP_RESPONSE_CHECKSTOPPED:
  2250. case AP_RESPONSE_BUSY:
  2251. /*
  2252. * The vfio_ap device driver only
  2253. * supports CEX4 and newer adapters, so
  2254. * remove the APID if the adapter is
  2255. * older than a CEX4.
  2256. */
  2257. if (info.at < AP_DEVICE_TYPE_CEX4) {
  2258. clear_bit_inv(apid, apm);
  2259. apid_cleared = true;
  2260. }
  2261. break;
  2262. default:
  2263. /*
  2264. * If we don't know the adapter type,
  2265. * clear its APID since it can't be
  2266. * determined whether the vfio_ap
  2267. * device driver supports it.
  2268. */
  2269. clear_bit_inv(apid, apm);
  2270. apid_cleared = true;
  2271. break;
  2272. }
  2273. /*
  2274. * If we've already cleared the APID from the apm, there
  2275. * is no need to continue examining the remainin AP
  2276. * queues to determine the type of the adapter.
  2277. */
  2278. if (apid_cleared)
  2279. continue;
  2280. }
  2281. }
  2282. }
  2283. /**
  2284. * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
  2285. * control domains that have been added to the host's
  2286. * AP configuration for each matrix mdev to which they
  2287. * are assigned.
  2288. *
  2289. * @apm_add: a bitmap specifying the adapters that have been added to the AP
  2290. * configuration.
  2291. * @aqm_add: a bitmap specifying the domains that have been added to the AP
  2292. * configuration.
  2293. * @adm_add: a bitmap specifying the control domains that have been added to the
  2294. * AP configuration.
  2295. */
  2296. static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
  2297. unsigned long *adm_add)
  2298. {
  2299. struct ap_matrix_mdev *matrix_mdev;
  2300. if (list_empty(&matrix_dev->mdev_list))
  2301. return;
  2302. vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
  2303. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  2304. bitmap_and(matrix_mdev->apm_add,
  2305. matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
  2306. bitmap_and(matrix_mdev->aqm_add,
  2307. matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
  2308. bitmap_and(matrix_mdev->adm_add,
  2309. matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
  2310. }
  2311. }
  2312. /**
  2313. * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
  2314. * control domains to the host AP configuration
  2315. * by updating the bitmaps that specify what adapters,
  2316. * domains and control domains have been added so they
  2317. * can be hot plugged into the guest when the AP bus
  2318. * scan completes (see vfio_ap_on_scan_complete
  2319. * function).
  2320. * @cur_config_info: the current AP configuration information
  2321. * @prev_config_info: the previous AP configuration information
  2322. */
  2323. static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
  2324. struct ap_config_info *prev_config_info)
  2325. {
  2326. bool do_add;
  2327. DECLARE_BITMAP(apm_add, AP_DEVICES);
  2328. DECLARE_BITMAP(aqm_add, AP_DOMAINS);
  2329. DECLARE_BITMAP(adm_add, AP_DOMAINS);
  2330. do_add = bitmap_andnot(apm_add,
  2331. (unsigned long *)cur_config_info->apm,
  2332. (unsigned long *)prev_config_info->apm,
  2333. AP_DEVICES);
  2334. do_add |= bitmap_andnot(aqm_add,
  2335. (unsigned long *)cur_config_info->aqm,
  2336. (unsigned long *)prev_config_info->aqm,
  2337. AP_DOMAINS);
  2338. do_add |= bitmap_andnot(adm_add,
  2339. (unsigned long *)cur_config_info->adm,
  2340. (unsigned long *)prev_config_info->adm,
  2341. AP_DOMAINS);
  2342. if (do_add)
  2343. vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
  2344. }
  2345. /**
  2346. * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
  2347. * configuration.
  2348. *
  2349. * @cur_cfg_info: the current host AP configuration
  2350. * @prev_cfg_info: the previous host AP configuration
  2351. */
  2352. void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
  2353. struct ap_config_info *prev_cfg_info)
  2354. {
  2355. if (!cur_cfg_info || !prev_cfg_info)
  2356. return;
  2357. mutex_lock(&matrix_dev->guests_lock);
  2358. vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
  2359. vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
  2360. memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
  2361. mutex_unlock(&matrix_dev->guests_lock);
  2362. }
  2363. static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
  2364. {
  2365. DECLARE_BITMAP(apm_filtered, AP_DEVICES);
  2366. bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
  2367. mutex_lock(&matrix_mdev->kvm->lock);
  2368. mutex_lock(&matrix_dev->mdevs_lock);
  2369. filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
  2370. matrix_mdev->apm_add, AP_DEVICES);
  2371. filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
  2372. matrix_mdev->aqm_add, AP_DOMAINS);
  2373. filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
  2374. matrix_mdev->adm_add, AP_DOMAINS);
  2375. if (filter_adapters || filter_domains)
  2376. do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
  2377. if (filter_cdoms)
  2378. do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
  2379. if (do_hotplug)
  2380. vfio_ap_mdev_update_guest_apcb(matrix_mdev);
  2381. reset_queues_for_apids(matrix_mdev, apm_filtered);
  2382. mutex_unlock(&matrix_dev->mdevs_lock);
  2383. mutex_unlock(&matrix_mdev->kvm->lock);
  2384. }
  2385. void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
  2386. struct ap_config_info *old_config_info)
  2387. {
  2388. struct ap_matrix_mdev *matrix_mdev;
  2389. mutex_lock(&matrix_dev->guests_lock);
  2390. list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
  2391. if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
  2392. bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
  2393. bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
  2394. continue;
  2395. vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
  2396. bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
  2397. bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
  2398. bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
  2399. }
  2400. mutex_unlock(&matrix_dev->guests_lock);
  2401. }