core.c 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * drivers/base/core.c - core driver model code (device registration, etc)
  4. *
  5. * Copyright (c) 2002-3 Patrick Mochel
  6. * Copyright (c) 2002-3 Open Source Development Labs
  7. * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
  8. * Copyright (c) 2006 Novell, Inc.
  9. */
  10. #include <linux/cpufreq.h>
  11. #include <linux/device.h>
  12. #include <linux/err.h>
  13. #include <linux/fwnode.h>
  14. #include <linux/init.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/string.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/notifier.h>
  20. #include <linux/of.h>
  21. #include <linux/of_device.h>
  22. #include <linux/genhd.h>
  23. #include <linux/mutex.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/sched/signal.h>
  27. #include <linux/sysfs.h>
  28. #include "base.h"
  29. #include "power/power.h"
  30. #ifdef CONFIG_SYSFS_DEPRECATED
  31. #ifdef CONFIG_SYSFS_DEPRECATED_V2
  32. long sysfs_deprecated = 1;
  33. #else
  34. long sysfs_deprecated = 0;
  35. #endif
  36. static int __init sysfs_deprecated_setup(char *arg)
  37. {
  38. return kstrtol(arg, 10, &sysfs_deprecated);
  39. }
  40. early_param("sysfs.deprecated", sysfs_deprecated_setup);
  41. #endif
  42. /* Device links support. */
  43. #ifdef CONFIG_SRCU
  44. static DEFINE_MUTEX(device_links_lock);
  45. DEFINE_STATIC_SRCU(device_links_srcu);
  46. static inline void device_links_write_lock(void)
  47. {
  48. mutex_lock(&device_links_lock);
  49. }
  50. static inline void device_links_write_unlock(void)
  51. {
  52. mutex_unlock(&device_links_lock);
  53. }
  54. int device_links_read_lock(void)
  55. {
  56. return srcu_read_lock(&device_links_srcu);
  57. }
  58. void device_links_read_unlock(int idx)
  59. {
  60. srcu_read_unlock(&device_links_srcu, idx);
  61. }
  62. #else /* !CONFIG_SRCU */
  63. static DECLARE_RWSEM(device_links_lock);
  64. static inline void device_links_write_lock(void)
  65. {
  66. down_write(&device_links_lock);
  67. }
  68. static inline void device_links_write_unlock(void)
  69. {
  70. up_write(&device_links_lock);
  71. }
  72. int device_links_read_lock(void)
  73. {
  74. down_read(&device_links_lock);
  75. return 0;
  76. }
  77. void device_links_read_unlock(int not_used)
  78. {
  79. up_read(&device_links_lock);
  80. }
  81. #endif /* !CONFIG_SRCU */
  82. static bool device_is_ancestor(struct device *dev, struct device *target)
  83. {
  84. while (target->parent) {
  85. target = target->parent;
  86. if (dev == target)
  87. return true;
  88. }
  89. return false;
  90. }
  91. /**
  92. * device_is_dependent - Check if one device depends on another one
  93. * @dev: Device to check dependencies for.
  94. * @target: Device to check against.
  95. *
  96. * Check if @target depends on @dev or any device dependent on it (its child or
  97. * its consumer etc). Return 1 if that is the case or 0 otherwise.
  98. */
  99. static int device_is_dependent(struct device *dev, void *target)
  100. {
  101. struct device_link *link;
  102. int ret;
  103. /*
  104. * The "ancestors" check is needed to catch the case when the target
  105. * device has not been completely initialized yet and it is still
  106. * missing from the list of children of its parent device.
  107. */
  108. if (dev == target || device_is_ancestor(dev, target))
  109. return 1;
  110. ret = device_for_each_child(dev, target, device_is_dependent);
  111. if (ret)
  112. return ret;
  113. list_for_each_entry(link, &dev->links.consumers, s_node) {
  114. if (link->consumer == target)
  115. return 1;
  116. ret = device_is_dependent(link->consumer, target);
  117. if (ret)
  118. break;
  119. }
  120. return ret;
  121. }
  122. static void device_link_init_status(struct device_link *link,
  123. struct device *consumer,
  124. struct device *supplier)
  125. {
  126. switch (supplier->links.status) {
  127. case DL_DEV_PROBING:
  128. switch (consumer->links.status) {
  129. case DL_DEV_PROBING:
  130. /*
  131. * A consumer driver can create a link to a supplier
  132. * that has not completed its probing yet as long as it
  133. * knows that the supplier is already functional (for
  134. * example, it has just acquired some resources from the
  135. * supplier).
  136. */
  137. link->status = DL_STATE_CONSUMER_PROBE;
  138. break;
  139. default:
  140. link->status = DL_STATE_DORMANT;
  141. break;
  142. }
  143. break;
  144. case DL_DEV_DRIVER_BOUND:
  145. switch (consumer->links.status) {
  146. case DL_DEV_PROBING:
  147. link->status = DL_STATE_CONSUMER_PROBE;
  148. break;
  149. case DL_DEV_DRIVER_BOUND:
  150. link->status = DL_STATE_ACTIVE;
  151. break;
  152. default:
  153. link->status = DL_STATE_AVAILABLE;
  154. break;
  155. }
  156. break;
  157. case DL_DEV_UNBINDING:
  158. link->status = DL_STATE_SUPPLIER_UNBIND;
  159. break;
  160. default:
  161. link->status = DL_STATE_DORMANT;
  162. break;
  163. }
  164. }
  165. static int device_reorder_to_tail(struct device *dev, void *not_used)
  166. {
  167. struct device_link *link;
  168. /*
  169. * Devices that have not been registered yet will be put to the ends
  170. * of the lists during the registration, so skip them here.
  171. */
  172. if (device_is_registered(dev))
  173. devices_kset_move_last(dev);
  174. if (device_pm_initialized(dev))
  175. device_pm_move_last(dev);
  176. device_for_each_child(dev, NULL, device_reorder_to_tail);
  177. list_for_each_entry(link, &dev->links.consumers, s_node)
  178. device_reorder_to_tail(link->consumer, NULL);
  179. return 0;
  180. }
  181. /**
  182. * device_pm_move_to_tail - Move set of devices to the end of device lists
  183. * @dev: Device to move
  184. *
  185. * This is a device_reorder_to_tail() wrapper taking the requisite locks.
  186. *
  187. * It moves the @dev along with all of its children and all of its consumers
  188. * to the ends of the device_kset and dpm_list, recursively.
  189. */
  190. void device_pm_move_to_tail(struct device *dev)
  191. {
  192. int idx;
  193. idx = device_links_read_lock();
  194. device_pm_lock();
  195. device_reorder_to_tail(dev, NULL);
  196. device_pm_unlock();
  197. device_links_read_unlock(idx);
  198. }
  199. #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
  200. DL_FLAG_AUTOREMOVE_SUPPLIER | \
  201. DL_FLAG_AUTOPROBE_CONSUMER)
  202. #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
  203. DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
  204. /**
  205. * device_link_add - Create a link between two devices.
  206. * @consumer: Consumer end of the link.
  207. * @supplier: Supplier end of the link.
  208. * @flags: Link flags.
  209. *
  210. * The caller is responsible for the proper synchronization of the link creation
  211. * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
  212. * runtime PM framework to take the link into account. Second, if the
  213. * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
  214. * be forced into the active metastate and reference-counted upon the creation
  215. * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
  216. * ignored.
  217. *
  218. * If DL_FLAG_STATELESS is set in @flags, the caller of this function is
  219. * expected to release the link returned by it directly with the help of either
  220. * device_link_del() or device_link_remove().
  221. *
  222. * If that flag is not set, however, the caller of this function is handing the
  223. * management of the link over to the driver core entirely and its return value
  224. * can only be used to check whether or not the link is present. In that case,
  225. * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
  226. * flags can be used to indicate to the driver core when the link can be safely
  227. * deleted. Namely, setting one of them in @flags indicates to the driver core
  228. * that the link is not going to be used (by the given caller of this function)
  229. * after unbinding the consumer or supplier driver, respectively, from its
  230. * device, so the link can be deleted at that point. If none of them is set,
  231. * the link will be maintained until one of the devices pointed to by it (either
  232. * the consumer or the supplier) is unregistered.
  233. *
  234. * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
  235. * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
  236. * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
  237. * be used to request the driver core to automaticall probe for a consmer
  238. * driver after successfully binding a driver to the supplier device.
  239. *
  240. * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
  241. * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
  242. * the same time is invalid and will cause NULL to be returned upfront.
  243. * However, if a device link between the given @consumer and @supplier pair
  244. * exists already when this function is called for them, the existing link will
  245. * be returned regardless of its current type and status (the link's flags may
  246. * be modified then). The caller of this function is then expected to treat
  247. * the link as though it has just been created, so (in particular) if
  248. * DL_FLAG_STATELESS was passed in @flags, the link needs to be released
  249. * explicitly when not needed any more (as stated above).
  250. *
  251. * A side effect of the link creation is re-ordering of dpm_list and the
  252. * devices_kset list by moving the consumer device and all devices depending
  253. * on it to the ends of these lists (that does not happen to devices that have
  254. * not been registered when this function is called).
  255. *
  256. * The supplier device is required to be registered when this function is called
  257. * and NULL will be returned if that is not the case. The consumer device need
  258. * not be registered, however.
  259. */
  260. struct device_link *device_link_add(struct device *consumer,
  261. struct device *supplier, u32 flags)
  262. {
  263. struct device_link *link;
  264. if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
  265. (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
  266. (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
  267. flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
  268. DL_FLAG_AUTOREMOVE_SUPPLIER)))
  269. return NULL;
  270. if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
  271. if (pm_runtime_get_sync(supplier) < 0) {
  272. pm_runtime_put_noidle(supplier);
  273. return NULL;
  274. }
  275. }
  276. if (!(flags & DL_FLAG_STATELESS))
  277. flags |= DL_FLAG_MANAGED;
  278. device_links_write_lock();
  279. device_pm_lock();
  280. /*
  281. * If the supplier has not been fully registered yet or there is a
  282. * reverse dependency between the consumer and the supplier already in
  283. * the graph, return NULL.
  284. */
  285. if (!device_pm_initialized(supplier)
  286. || device_is_dependent(consumer, supplier)) {
  287. link = NULL;
  288. goto out;
  289. }
  290. /*
  291. * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
  292. * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
  293. * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
  294. */
  295. if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
  296. flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
  297. list_for_each_entry(link, &supplier->links.consumers, s_node) {
  298. if (link->consumer != consumer)
  299. continue;
  300. if (flags & DL_FLAG_PM_RUNTIME) {
  301. if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
  302. pm_runtime_new_link(consumer);
  303. link->flags |= DL_FLAG_PM_RUNTIME;
  304. }
  305. if (flags & DL_FLAG_RPM_ACTIVE)
  306. refcount_inc(&link->rpm_active);
  307. }
  308. if (flags & DL_FLAG_STATELESS) {
  309. link->flags |= DL_FLAG_STATELESS;
  310. kref_get(&link->kref);
  311. goto out;
  312. }
  313. /*
  314. * If the life time of the link following from the new flags is
  315. * longer than indicated by the flags of the existing link,
  316. * update the existing link to stay around longer.
  317. */
  318. if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
  319. if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
  320. link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
  321. link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
  322. }
  323. } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
  324. link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
  325. DL_FLAG_AUTOREMOVE_SUPPLIER);
  326. }
  327. if (!(link->flags & DL_FLAG_MANAGED)) {
  328. kref_get(&link->kref);
  329. link->flags |= DL_FLAG_MANAGED;
  330. device_link_init_status(link, consumer, supplier);
  331. }
  332. goto out;
  333. }
  334. link = kzalloc(sizeof(*link), GFP_KERNEL);
  335. if (!link)
  336. goto out;
  337. refcount_set(&link->rpm_active, 1);
  338. if (flags & DL_FLAG_PM_RUNTIME) {
  339. if (flags & DL_FLAG_RPM_ACTIVE)
  340. refcount_inc(&link->rpm_active);
  341. pm_runtime_new_link(consumer);
  342. }
  343. get_device(supplier);
  344. link->supplier = supplier;
  345. INIT_LIST_HEAD(&link->s_node);
  346. get_device(consumer);
  347. link->consumer = consumer;
  348. INIT_LIST_HEAD(&link->c_node);
  349. link->flags = flags;
  350. kref_init(&link->kref);
  351. /* Determine the initial link state. */
  352. if (flags & DL_FLAG_STATELESS)
  353. link->status = DL_STATE_NONE;
  354. else
  355. device_link_init_status(link, consumer, supplier);
  356. /*
  357. * Some callers expect the link creation during consumer driver probe to
  358. * resume the supplier even without DL_FLAG_RPM_ACTIVE.
  359. */
  360. if (link->status == DL_STATE_CONSUMER_PROBE &&
  361. flags & DL_FLAG_PM_RUNTIME)
  362. pm_runtime_resume(supplier);
  363. /*
  364. * Move the consumer and all of the devices depending on it to the end
  365. * of dpm_list and the devices_kset list.
  366. *
  367. * It is necessary to hold dpm_list locked throughout all that or else
  368. * we may end up suspending with a wrong ordering of it.
  369. */
  370. device_reorder_to_tail(consumer, NULL);
  371. list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
  372. list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
  373. dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
  374. out:
  375. device_pm_unlock();
  376. device_links_write_unlock();
  377. if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
  378. pm_runtime_put(supplier);
  379. return link;
  380. }
  381. EXPORT_SYMBOL_GPL(device_link_add);
  382. static void device_link_free(struct device_link *link)
  383. {
  384. while (refcount_dec_not_one(&link->rpm_active))
  385. pm_runtime_put(link->supplier);
  386. put_device(link->consumer);
  387. put_device(link->supplier);
  388. kfree(link);
  389. }
  390. #ifdef CONFIG_SRCU
  391. static void __device_link_free_srcu(struct rcu_head *rhead)
  392. {
  393. device_link_free(container_of(rhead, struct device_link, rcu_head));
  394. }
  395. static void __device_link_del(struct kref *kref)
  396. {
  397. struct device_link *link = container_of(kref, struct device_link, kref);
  398. dev_info(link->consumer, "Dropping the link to %s\n",
  399. dev_name(link->supplier));
  400. if (link->flags & DL_FLAG_PM_RUNTIME)
  401. pm_runtime_drop_link(link->consumer);
  402. list_del_rcu(&link->s_node);
  403. list_del_rcu(&link->c_node);
  404. call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
  405. }
  406. #else /* !CONFIG_SRCU */
  407. static void __device_link_del(struct kref *kref)
  408. {
  409. struct device_link *link = container_of(kref, struct device_link, kref);
  410. dev_info(link->consumer, "Dropping the link to %s\n",
  411. dev_name(link->supplier));
  412. if (link->flags & DL_FLAG_PM_RUNTIME)
  413. pm_runtime_drop_link(link->consumer);
  414. list_del(&link->s_node);
  415. list_del(&link->c_node);
  416. device_link_free(link);
  417. }
  418. #endif /* !CONFIG_SRCU */
  419. static void device_link_put_kref(struct device_link *link)
  420. {
  421. if (link->flags & DL_FLAG_STATELESS)
  422. kref_put(&link->kref, __device_link_del);
  423. else
  424. WARN(1, "Unable to drop a managed device link reference\n");
  425. }
  426. /**
  427. * device_link_del - Delete a stateless link between two devices.
  428. * @link: Device link to delete.
  429. *
  430. * The caller must ensure proper synchronization of this function with runtime
  431. * PM. If the link was added multiple times, it needs to be deleted as often.
  432. * Care is required for hotplugged devices: Their links are purged on removal
  433. * and calling device_link_del() is then no longer allowed.
  434. */
  435. void device_link_del(struct device_link *link)
  436. {
  437. device_links_write_lock();
  438. device_pm_lock();
  439. device_link_put_kref(link);
  440. device_pm_unlock();
  441. device_links_write_unlock();
  442. }
  443. EXPORT_SYMBOL_GPL(device_link_del);
  444. /**
  445. * device_link_remove - Delete a stateless link between two devices.
  446. * @consumer: Consumer end of the link.
  447. * @supplier: Supplier end of the link.
  448. *
  449. * The caller must ensure proper synchronization of this function with runtime
  450. * PM.
  451. */
  452. void device_link_remove(void *consumer, struct device *supplier)
  453. {
  454. struct device_link *link;
  455. if (WARN_ON(consumer == supplier))
  456. return;
  457. device_links_write_lock();
  458. device_pm_lock();
  459. list_for_each_entry(link, &supplier->links.consumers, s_node) {
  460. if (link->consumer == consumer) {
  461. device_link_put_kref(link);
  462. break;
  463. }
  464. }
  465. device_pm_unlock();
  466. device_links_write_unlock();
  467. }
  468. EXPORT_SYMBOL_GPL(device_link_remove);
  469. static void device_links_missing_supplier(struct device *dev)
  470. {
  471. struct device_link *link;
  472. list_for_each_entry(link, &dev->links.suppliers, c_node)
  473. if (link->status == DL_STATE_CONSUMER_PROBE)
  474. WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
  475. }
  476. /**
  477. * device_links_check_suppliers - Check presence of supplier drivers.
  478. * @dev: Consumer device.
  479. *
  480. * Check links from this device to any suppliers. Walk the list of the device's
  481. * links to suppliers and see if all of them are available. If not, simply
  482. * return -EPROBE_DEFER.
  483. *
  484. * We need to guarantee that the supplier will not go away after the check has
  485. * been positive here. It only can go away in __device_release_driver() and
  486. * that function checks the device's links to consumers. This means we need to
  487. * mark the link as "consumer probe in progress" to make the supplier removal
  488. * wait for us to complete (or bad things may happen).
  489. *
  490. * Links without the DL_FLAG_MANAGED flag set are ignored.
  491. */
  492. int device_links_check_suppliers(struct device *dev)
  493. {
  494. struct device_link *link;
  495. int ret = 0;
  496. device_links_write_lock();
  497. list_for_each_entry(link, &dev->links.suppliers, c_node) {
  498. if (!(link->flags & DL_FLAG_MANAGED))
  499. continue;
  500. if (link->status != DL_STATE_AVAILABLE) {
  501. device_links_missing_supplier(dev);
  502. ret = -EPROBE_DEFER;
  503. break;
  504. }
  505. WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
  506. }
  507. dev->links.status = DL_DEV_PROBING;
  508. device_links_write_unlock();
  509. return ret;
  510. }
  511. /**
  512. * device_links_driver_bound - Update device links after probing its driver.
  513. * @dev: Device to update the links for.
  514. *
  515. * The probe has been successful, so update links from this device to any
  516. * consumers by changing their status to "available".
  517. *
  518. * Also change the status of @dev's links to suppliers to "active".
  519. *
  520. * Links without the DL_FLAG_MANAGED flag set are ignored.
  521. */
  522. void device_links_driver_bound(struct device *dev)
  523. {
  524. struct device_link *link;
  525. device_links_write_lock();
  526. list_for_each_entry(link, &dev->links.consumers, s_node) {
  527. if (!(link->flags & DL_FLAG_MANAGED))
  528. continue;
  529. /*
  530. * Links created during consumer probe may be in the "consumer
  531. * probe" state to start with if the supplier is still probing
  532. * when they are created and they may become "active" if the
  533. * consumer probe returns first. Skip them here.
  534. */
  535. if (link->status == DL_STATE_CONSUMER_PROBE ||
  536. link->status == DL_STATE_ACTIVE)
  537. continue;
  538. WARN_ON(link->status != DL_STATE_DORMANT);
  539. WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
  540. if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
  541. driver_deferred_probe_add(link->consumer);
  542. }
  543. list_for_each_entry(link, &dev->links.suppliers, c_node) {
  544. if (!(link->flags & DL_FLAG_MANAGED))
  545. continue;
  546. WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
  547. WRITE_ONCE(link->status, DL_STATE_ACTIVE);
  548. }
  549. dev->links.status = DL_DEV_DRIVER_BOUND;
  550. device_links_write_unlock();
  551. }
  552. static void device_link_drop_managed(struct device_link *link)
  553. {
  554. link->flags &= ~DL_FLAG_MANAGED;
  555. WRITE_ONCE(link->status, DL_STATE_NONE);
  556. kref_put(&link->kref, __device_link_del);
  557. }
  558. /**
  559. * __device_links_no_driver - Update links of a device without a driver.
  560. * @dev: Device without a drvier.
  561. *
  562. * Delete all non-persistent links from this device to any suppliers.
  563. *
  564. * Persistent links stay around, but their status is changed to "available",
  565. * unless they already are in the "supplier unbind in progress" state in which
  566. * case they need not be updated.
  567. *
  568. * Links without the DL_FLAG_MANAGED flag set are ignored.
  569. */
  570. static void __device_links_no_driver(struct device *dev)
  571. {
  572. struct device_link *link, *ln;
  573. list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
  574. if (!(link->flags & DL_FLAG_MANAGED))
  575. continue;
  576. if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
  577. device_link_drop_managed(link);
  578. else if (link->status == DL_STATE_CONSUMER_PROBE ||
  579. link->status == DL_STATE_ACTIVE)
  580. WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
  581. }
  582. dev->links.status = DL_DEV_NO_DRIVER;
  583. }
  584. /**
  585. * device_links_no_driver - Update links after failing driver probe.
  586. * @dev: Device whose driver has just failed to probe.
  587. *
  588. * Clean up leftover links to consumers for @dev and invoke
  589. * %__device_links_no_driver() to update links to suppliers for it as
  590. * appropriate.
  591. *
  592. * Links without the DL_FLAG_MANAGED flag set are ignored.
  593. */
  594. void device_links_no_driver(struct device *dev)
  595. {
  596. struct device_link *link;
  597. device_links_write_lock();
  598. list_for_each_entry(link, &dev->links.consumers, s_node) {
  599. if (!(link->flags & DL_FLAG_MANAGED))
  600. continue;
  601. /*
  602. * The probe has failed, so if the status of the link is
  603. * "consumer probe" or "active", it must have been added by
  604. * a probing consumer while this device was still probing.
  605. * Change its state to "dormant", as it represents a valid
  606. * relationship, but it is not functionally meaningful.
  607. */
  608. if (link->status == DL_STATE_CONSUMER_PROBE ||
  609. link->status == DL_STATE_ACTIVE)
  610. WRITE_ONCE(link->status, DL_STATE_DORMANT);
  611. }
  612. __device_links_no_driver(dev);
  613. device_links_write_unlock();
  614. }
  615. /**
  616. * device_links_driver_cleanup - Update links after driver removal.
  617. * @dev: Device whose driver has just gone away.
  618. *
  619. * Update links to consumers for @dev by changing their status to "dormant" and
  620. * invoke %__device_links_no_driver() to update links to suppliers for it as
  621. * appropriate.
  622. *
  623. * Links without the DL_FLAG_MANAGED flag set are ignored.
  624. */
  625. void device_links_driver_cleanup(struct device *dev)
  626. {
  627. struct device_link *link, *ln;
  628. device_links_write_lock();
  629. list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
  630. if (!(link->flags & DL_FLAG_MANAGED))
  631. continue;
  632. WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
  633. WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
  634. /*
  635. * autoremove the links between this @dev and its consumer
  636. * devices that are not active, i.e. where the link state
  637. * has moved to DL_STATE_SUPPLIER_UNBIND.
  638. */
  639. if (link->status == DL_STATE_SUPPLIER_UNBIND &&
  640. link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
  641. device_link_drop_managed(link);
  642. WRITE_ONCE(link->status, DL_STATE_DORMANT);
  643. }
  644. __device_links_no_driver(dev);
  645. device_links_write_unlock();
  646. }
  647. /**
  648. * device_links_busy - Check if there are any busy links to consumers.
  649. * @dev: Device to check.
  650. *
  651. * Check each consumer of the device and return 'true' if its link's status
  652. * is one of "consumer probe" or "active" (meaning that the given consumer is
  653. * probing right now or its driver is present). Otherwise, change the link
  654. * state to "supplier unbind" to prevent the consumer from being probed
  655. * successfully going forward.
  656. *
  657. * Return 'false' if there are no probing or active consumers.
  658. *
  659. * Links without the DL_FLAG_MANAGED flag set are ignored.
  660. */
  661. bool device_links_busy(struct device *dev)
  662. {
  663. struct device_link *link;
  664. bool ret = false;
  665. device_links_write_lock();
  666. list_for_each_entry(link, &dev->links.consumers, s_node) {
  667. if (!(link->flags & DL_FLAG_MANAGED))
  668. continue;
  669. if (link->status == DL_STATE_CONSUMER_PROBE
  670. || link->status == DL_STATE_ACTIVE) {
  671. ret = true;
  672. break;
  673. }
  674. WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
  675. }
  676. dev->links.status = DL_DEV_UNBINDING;
  677. device_links_write_unlock();
  678. return ret;
  679. }
  680. /**
  681. * device_links_unbind_consumers - Force unbind consumers of the given device.
  682. * @dev: Device to unbind the consumers of.
  683. *
  684. * Walk the list of links to consumers for @dev and if any of them is in the
  685. * "consumer probe" state, wait for all device probes in progress to complete
  686. * and start over.
  687. *
  688. * If that's not the case, change the status of the link to "supplier unbind"
  689. * and check if the link was in the "active" state. If so, force the consumer
  690. * driver to unbind and start over (the consumer will not re-probe as we have
  691. * changed the state of the link already).
  692. *
  693. * Links without the DL_FLAG_MANAGED flag set are ignored.
  694. */
  695. void device_links_unbind_consumers(struct device *dev)
  696. {
  697. struct device_link *link;
  698. start:
  699. device_links_write_lock();
  700. list_for_each_entry(link, &dev->links.consumers, s_node) {
  701. enum device_link_state status;
  702. if (!(link->flags & DL_FLAG_MANAGED))
  703. continue;
  704. status = link->status;
  705. if (status == DL_STATE_CONSUMER_PROBE) {
  706. device_links_write_unlock();
  707. wait_for_device_probe();
  708. goto start;
  709. }
  710. WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
  711. if (status == DL_STATE_ACTIVE) {
  712. struct device *consumer = link->consumer;
  713. get_device(consumer);
  714. device_links_write_unlock();
  715. device_release_driver_internal(consumer, NULL,
  716. consumer->parent);
  717. put_device(consumer);
  718. goto start;
  719. }
  720. }
  721. device_links_write_unlock();
  722. }
  723. /**
  724. * device_links_purge - Delete existing links to other devices.
  725. * @dev: Target device.
  726. */
  727. static void device_links_purge(struct device *dev)
  728. {
  729. struct device_link *link, *ln;
  730. /*
  731. * Delete all of the remaining links from this device to any other
  732. * devices (either consumers or suppliers).
  733. */
  734. device_links_write_lock();
  735. list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
  736. WARN_ON(link->status == DL_STATE_ACTIVE);
  737. __device_link_del(&link->kref);
  738. }
  739. list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
  740. WARN_ON(link->status != DL_STATE_DORMANT &&
  741. link->status != DL_STATE_NONE);
  742. __device_link_del(&link->kref);
  743. }
  744. device_links_write_unlock();
  745. }
  746. /* Device links support end. */
  747. int (*platform_notify)(struct device *dev) = NULL;
  748. int (*platform_notify_remove)(struct device *dev) = NULL;
  749. static struct kobject *dev_kobj;
  750. struct kobject *sysfs_dev_char_kobj;
  751. struct kobject *sysfs_dev_block_kobj;
  752. static DEFINE_MUTEX(device_hotplug_lock);
  753. void lock_device_hotplug(void)
  754. {
  755. mutex_lock(&device_hotplug_lock);
  756. }
  757. void unlock_device_hotplug(void)
  758. {
  759. mutex_unlock(&device_hotplug_lock);
  760. }
  761. int lock_device_hotplug_sysfs(void)
  762. {
  763. if (mutex_trylock(&device_hotplug_lock))
  764. return 0;
  765. /* Avoid busy looping (5 ms of sleep should do). */
  766. msleep(5);
  767. return restart_syscall();
  768. }
  769. #ifdef CONFIG_BLOCK
  770. static inline int device_is_not_partition(struct device *dev)
  771. {
  772. return !(dev->type == &part_type);
  773. }
  774. #else
  775. static inline int device_is_not_partition(struct device *dev)
  776. {
  777. return 1;
  778. }
  779. #endif
  780. /**
  781. * dev_driver_string - Return a device's driver name, if at all possible
  782. * @dev: struct device to get the name of
  783. *
  784. * Will return the device's driver's name if it is bound to a device. If
  785. * the device is not bound to a driver, it will return the name of the bus
  786. * it is attached to. If it is not attached to a bus either, an empty
  787. * string will be returned.
  788. */
  789. const char *dev_driver_string(const struct device *dev)
  790. {
  791. struct device_driver *drv;
  792. /* dev->driver can change to NULL underneath us because of unbinding,
  793. * so be careful about accessing it. dev->bus and dev->class should
  794. * never change once they are set, so they don't need special care.
  795. */
  796. drv = READ_ONCE(dev->driver);
  797. return drv ? drv->name :
  798. (dev->bus ? dev->bus->name :
  799. (dev->class ? dev->class->name : ""));
  800. }
  801. EXPORT_SYMBOL(dev_driver_string);
  802. #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
  803. static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
  804. char *buf)
  805. {
  806. struct device_attribute *dev_attr = to_dev_attr(attr);
  807. struct device *dev = kobj_to_dev(kobj);
  808. ssize_t ret = -EIO;
  809. if (dev_attr->show)
  810. ret = dev_attr->show(dev, dev_attr, buf);
  811. if (ret >= (ssize_t)PAGE_SIZE) {
  812. printk("dev_attr_show: %pS returned bad count\n",
  813. dev_attr->show);
  814. }
  815. return ret;
  816. }
  817. static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
  818. const char *buf, size_t count)
  819. {
  820. struct device_attribute *dev_attr = to_dev_attr(attr);
  821. struct device *dev = kobj_to_dev(kobj);
  822. ssize_t ret = -EIO;
  823. if (dev_attr->store)
  824. ret = dev_attr->store(dev, dev_attr, buf, count);
  825. return ret;
  826. }
  827. static const struct sysfs_ops dev_sysfs_ops = {
  828. .show = dev_attr_show,
  829. .store = dev_attr_store,
  830. };
  831. #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
  832. ssize_t device_store_ulong(struct device *dev,
  833. struct device_attribute *attr,
  834. const char *buf, size_t size)
  835. {
  836. struct dev_ext_attribute *ea = to_ext_attr(attr);
  837. char *end;
  838. unsigned long new = simple_strtoul(buf, &end, 0);
  839. if (end == buf)
  840. return -EINVAL;
  841. *(unsigned long *)(ea->var) = new;
  842. /* Always return full write size even if we didn't consume all */
  843. return size;
  844. }
  845. EXPORT_SYMBOL_GPL(device_store_ulong);
  846. ssize_t device_show_ulong(struct device *dev,
  847. struct device_attribute *attr,
  848. char *buf)
  849. {
  850. struct dev_ext_attribute *ea = to_ext_attr(attr);
  851. return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
  852. }
  853. EXPORT_SYMBOL_GPL(device_show_ulong);
  854. ssize_t device_store_int(struct device *dev,
  855. struct device_attribute *attr,
  856. const char *buf, size_t size)
  857. {
  858. struct dev_ext_attribute *ea = to_ext_attr(attr);
  859. char *end;
  860. long new = simple_strtol(buf, &end, 0);
  861. if (end == buf || new > INT_MAX || new < INT_MIN)
  862. return -EINVAL;
  863. *(int *)(ea->var) = new;
  864. /* Always return full write size even if we didn't consume all */
  865. return size;
  866. }
  867. EXPORT_SYMBOL_GPL(device_store_int);
  868. ssize_t device_show_int(struct device *dev,
  869. struct device_attribute *attr,
  870. char *buf)
  871. {
  872. struct dev_ext_attribute *ea = to_ext_attr(attr);
  873. return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
  874. }
  875. EXPORT_SYMBOL_GPL(device_show_int);
  876. ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
  877. const char *buf, size_t size)
  878. {
  879. struct dev_ext_attribute *ea = to_ext_attr(attr);
  880. if (strtobool(buf, ea->var) < 0)
  881. return -EINVAL;
  882. return size;
  883. }
  884. EXPORT_SYMBOL_GPL(device_store_bool);
  885. ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
  886. char *buf)
  887. {
  888. struct dev_ext_attribute *ea = to_ext_attr(attr);
  889. return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
  890. }
  891. EXPORT_SYMBOL_GPL(device_show_bool);
  892. /**
  893. * device_release - free device structure.
  894. * @kobj: device's kobject.
  895. *
  896. * This is called once the reference count for the object
  897. * reaches 0. We forward the call to the device's release
  898. * method, which should handle actually freeing the structure.
  899. */
  900. static void device_release(struct kobject *kobj)
  901. {
  902. struct device *dev = kobj_to_dev(kobj);
  903. struct device_private *p = dev->p;
  904. /*
  905. * Some platform devices are driven without driver attached
  906. * and managed resources may have been acquired. Make sure
  907. * all resources are released.
  908. *
  909. * Drivers still can add resources into device after device
  910. * is deleted but alive, so release devres here to avoid
  911. * possible memory leak.
  912. */
  913. devres_release_all(dev);
  914. if (dev->release)
  915. dev->release(dev);
  916. else if (dev->type && dev->type->release)
  917. dev->type->release(dev);
  918. else if (dev->class && dev->class->dev_release)
  919. dev->class->dev_release(dev);
  920. else
  921. WARN(1, KERN_ERR "Device '%s' does not have a release() "
  922. "function, it is broken and must be fixed.\n",
  923. dev_name(dev));
  924. kfree(p);
  925. }
  926. static const void *device_namespace(struct kobject *kobj)
  927. {
  928. struct device *dev = kobj_to_dev(kobj);
  929. const void *ns = NULL;
  930. if (dev->class && dev->class->ns_type)
  931. ns = dev->class->namespace(dev);
  932. return ns;
  933. }
  934. static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
  935. {
  936. struct device *dev = kobj_to_dev(kobj);
  937. if (dev->class && dev->class->get_ownership)
  938. dev->class->get_ownership(dev, uid, gid);
  939. }
  940. static struct kobj_type device_ktype = {
  941. .release = device_release,
  942. .sysfs_ops = &dev_sysfs_ops,
  943. .namespace = device_namespace,
  944. .get_ownership = device_get_ownership,
  945. };
  946. static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
  947. {
  948. struct kobj_type *ktype = get_ktype(kobj);
  949. if (ktype == &device_ktype) {
  950. struct device *dev = kobj_to_dev(kobj);
  951. if (dev->bus)
  952. return 1;
  953. if (dev->class)
  954. return 1;
  955. }
  956. return 0;
  957. }
  958. static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
  959. {
  960. struct device *dev = kobj_to_dev(kobj);
  961. if (dev->bus)
  962. return dev->bus->name;
  963. if (dev->class)
  964. return dev->class->name;
  965. return NULL;
  966. }
  967. static int dev_uevent(struct kset *kset, struct kobject *kobj,
  968. struct kobj_uevent_env *env)
  969. {
  970. struct device *dev = kobj_to_dev(kobj);
  971. int retval = 0;
  972. /* add device node properties if present */
  973. if (MAJOR(dev->devt)) {
  974. const char *tmp;
  975. const char *name;
  976. umode_t mode = 0;
  977. kuid_t uid = GLOBAL_ROOT_UID;
  978. kgid_t gid = GLOBAL_ROOT_GID;
  979. add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
  980. add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
  981. name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
  982. if (name) {
  983. add_uevent_var(env, "DEVNAME=%s", name);
  984. if (mode)
  985. add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
  986. if (!uid_eq(uid, GLOBAL_ROOT_UID))
  987. add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
  988. if (!gid_eq(gid, GLOBAL_ROOT_GID))
  989. add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
  990. kfree(tmp);
  991. }
  992. }
  993. if (dev->type && dev->type->name)
  994. add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
  995. if (dev->driver)
  996. add_uevent_var(env, "DRIVER=%s", dev->driver->name);
  997. /* Add common DT information about the device */
  998. of_device_uevent(dev, env);
  999. /* have the bus specific function add its stuff */
  1000. if (dev->bus && dev->bus->uevent) {
  1001. retval = dev->bus->uevent(dev, env);
  1002. if (retval)
  1003. pr_debug("device: '%s': %s: bus uevent() returned %d\n",
  1004. dev_name(dev), __func__, retval);
  1005. }
  1006. /* have the class specific function add its stuff */
  1007. if (dev->class && dev->class->dev_uevent) {
  1008. retval = dev->class->dev_uevent(dev, env);
  1009. if (retval)
  1010. pr_debug("device: '%s': %s: class uevent() "
  1011. "returned %d\n", dev_name(dev),
  1012. __func__, retval);
  1013. }
  1014. /* have the device type specific function add its stuff */
  1015. if (dev->type && dev->type->uevent) {
  1016. retval = dev->type->uevent(dev, env);
  1017. if (retval)
  1018. pr_debug("device: '%s': %s: dev_type uevent() "
  1019. "returned %d\n", dev_name(dev),
  1020. __func__, retval);
  1021. }
  1022. return retval;
  1023. }
  1024. static const struct kset_uevent_ops device_uevent_ops = {
  1025. .filter = dev_uevent_filter,
  1026. .name = dev_uevent_name,
  1027. .uevent = dev_uevent,
  1028. };
  1029. static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
  1030. char *buf)
  1031. {
  1032. struct kobject *top_kobj;
  1033. struct kset *kset;
  1034. struct kobj_uevent_env *env = NULL;
  1035. int i;
  1036. size_t count = 0;
  1037. int retval;
  1038. /* search the kset, the device belongs to */
  1039. top_kobj = &dev->kobj;
  1040. while (!top_kobj->kset && top_kobj->parent)
  1041. top_kobj = top_kobj->parent;
  1042. if (!top_kobj->kset)
  1043. goto out;
  1044. kset = top_kobj->kset;
  1045. if (!kset->uevent_ops || !kset->uevent_ops->uevent)
  1046. goto out;
  1047. /* respect filter */
  1048. if (kset->uevent_ops && kset->uevent_ops->filter)
  1049. if (!kset->uevent_ops->filter(kset, &dev->kobj))
  1050. goto out;
  1051. env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
  1052. if (!env)
  1053. return -ENOMEM;
  1054. /* let the kset specific function add its keys */
  1055. retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
  1056. if (retval)
  1057. goto out;
  1058. /* copy keys to file */
  1059. for (i = 0; i < env->envp_idx; i++)
  1060. count += sprintf(&buf[count], "%s\n", env->envp[i]);
  1061. out:
  1062. kfree(env);
  1063. return count;
  1064. }
  1065. static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
  1066. const char *buf, size_t count)
  1067. {
  1068. int rc;
  1069. rc = kobject_synth_uevent(&dev->kobj, buf, count);
  1070. if (rc) {
  1071. dev_err(dev, "uevent: failed to send synthetic uevent\n");
  1072. return rc;
  1073. }
  1074. return count;
  1075. }
  1076. static DEVICE_ATTR_RW(uevent);
  1077. static ssize_t online_show(struct device *dev, struct device_attribute *attr,
  1078. char *buf)
  1079. {
  1080. bool val;
  1081. device_lock(dev);
  1082. val = !dev->offline;
  1083. device_unlock(dev);
  1084. return sprintf(buf, "%u\n", val);
  1085. }
  1086. static ssize_t online_store(struct device *dev, struct device_attribute *attr,
  1087. const char *buf, size_t count)
  1088. {
  1089. bool val;
  1090. int ret;
  1091. ret = strtobool(buf, &val);
  1092. if (ret < 0)
  1093. return ret;
  1094. ret = lock_device_hotplug_sysfs();
  1095. if (ret)
  1096. return ret;
  1097. ret = val ? device_online(dev) : device_offline(dev);
  1098. unlock_device_hotplug();
  1099. return ret < 0 ? ret : count;
  1100. }
  1101. static DEVICE_ATTR_RW(online);
  1102. int device_add_groups(struct device *dev, const struct attribute_group **groups)
  1103. {
  1104. return sysfs_create_groups(&dev->kobj, groups);
  1105. }
  1106. EXPORT_SYMBOL_GPL(device_add_groups);
  1107. void device_remove_groups(struct device *dev,
  1108. const struct attribute_group **groups)
  1109. {
  1110. sysfs_remove_groups(&dev->kobj, groups);
  1111. }
  1112. EXPORT_SYMBOL_GPL(device_remove_groups);
  1113. union device_attr_group_devres {
  1114. const struct attribute_group *group;
  1115. const struct attribute_group **groups;
  1116. };
  1117. static int devm_attr_group_match(struct device *dev, void *res, void *data)
  1118. {
  1119. return ((union device_attr_group_devres *)res)->group == data;
  1120. }
  1121. static void devm_attr_group_remove(struct device *dev, void *res)
  1122. {
  1123. union device_attr_group_devres *devres = res;
  1124. const struct attribute_group *group = devres->group;
  1125. dev_dbg(dev, "%s: removing group %p\n", __func__, group);
  1126. sysfs_remove_group(&dev->kobj, group);
  1127. }
  1128. static void devm_attr_groups_remove(struct device *dev, void *res)
  1129. {
  1130. union device_attr_group_devres *devres = res;
  1131. const struct attribute_group **groups = devres->groups;
  1132. dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
  1133. sysfs_remove_groups(&dev->kobj, groups);
  1134. }
  1135. /**
  1136. * devm_device_add_group - given a device, create a managed attribute group
  1137. * @dev: The device to create the group for
  1138. * @grp: The attribute group to create
  1139. *
  1140. * This function creates a group for the first time. It will explicitly
  1141. * warn and error if any of the attribute files being created already exist.
  1142. *
  1143. * Returns 0 on success or error code on failure.
  1144. */
  1145. int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
  1146. {
  1147. union device_attr_group_devres *devres;
  1148. int error;
  1149. devres = devres_alloc(devm_attr_group_remove,
  1150. sizeof(*devres), GFP_KERNEL);
  1151. if (!devres)
  1152. return -ENOMEM;
  1153. error = sysfs_create_group(&dev->kobj, grp);
  1154. if (error) {
  1155. devres_free(devres);
  1156. return error;
  1157. }
  1158. devres->group = grp;
  1159. devres_add(dev, devres);
  1160. return 0;
  1161. }
  1162. EXPORT_SYMBOL_GPL(devm_device_add_group);
  1163. /**
  1164. * devm_device_remove_group: remove a managed group from a device
  1165. * @dev: device to remove the group from
  1166. * @grp: group to remove
  1167. *
  1168. * This function removes a group of attributes from a device. The attributes
  1169. * previously have to have been created for this group, otherwise it will fail.
  1170. */
  1171. void devm_device_remove_group(struct device *dev,
  1172. const struct attribute_group *grp)
  1173. {
  1174. WARN_ON(devres_release(dev, devm_attr_group_remove,
  1175. devm_attr_group_match,
  1176. /* cast away const */ (void *)grp));
  1177. }
  1178. EXPORT_SYMBOL_GPL(devm_device_remove_group);
  1179. /**
  1180. * devm_device_add_groups - create a bunch of managed attribute groups
  1181. * @dev: The device to create the group for
  1182. * @groups: The attribute groups to create, NULL terminated
  1183. *
  1184. * This function creates a bunch of managed attribute groups. If an error
  1185. * occurs when creating a group, all previously created groups will be
  1186. * removed, unwinding everything back to the original state when this
  1187. * function was called. It will explicitly warn and error if any of the
  1188. * attribute files being created already exist.
  1189. *
  1190. * Returns 0 on success or error code from sysfs_create_group on failure.
  1191. */
  1192. int devm_device_add_groups(struct device *dev,
  1193. const struct attribute_group **groups)
  1194. {
  1195. union device_attr_group_devres *devres;
  1196. int error;
  1197. devres = devres_alloc(devm_attr_groups_remove,
  1198. sizeof(*devres), GFP_KERNEL);
  1199. if (!devres)
  1200. return -ENOMEM;
  1201. error = sysfs_create_groups(&dev->kobj, groups);
  1202. if (error) {
  1203. devres_free(devres);
  1204. return error;
  1205. }
  1206. devres->groups = groups;
  1207. devres_add(dev, devres);
  1208. return 0;
  1209. }
  1210. EXPORT_SYMBOL_GPL(devm_device_add_groups);
  1211. /**
  1212. * devm_device_remove_groups - remove a list of managed groups
  1213. *
  1214. * @dev: The device for the groups to be removed from
  1215. * @groups: NULL terminated list of groups to be removed
  1216. *
  1217. * If groups is not NULL, remove the specified groups from the device.
  1218. */
  1219. void devm_device_remove_groups(struct device *dev,
  1220. const struct attribute_group **groups)
  1221. {
  1222. WARN_ON(devres_release(dev, devm_attr_groups_remove,
  1223. devm_attr_group_match,
  1224. /* cast away const */ (void *)groups));
  1225. }
  1226. EXPORT_SYMBOL_GPL(devm_device_remove_groups);
  1227. static int device_add_attrs(struct device *dev)
  1228. {
  1229. struct class *class = dev->class;
  1230. const struct device_type *type = dev->type;
  1231. int error;
  1232. if (class) {
  1233. error = device_add_groups(dev, class->dev_groups);
  1234. if (error)
  1235. return error;
  1236. }
  1237. if (type) {
  1238. error = device_add_groups(dev, type->groups);
  1239. if (error)
  1240. goto err_remove_class_groups;
  1241. }
  1242. error = device_add_groups(dev, dev->groups);
  1243. if (error)
  1244. goto err_remove_type_groups;
  1245. if (device_supports_offline(dev) && !dev->offline_disabled) {
  1246. error = device_create_file(dev, &dev_attr_online);
  1247. if (error)
  1248. goto err_remove_dev_groups;
  1249. }
  1250. return 0;
  1251. err_remove_dev_groups:
  1252. device_remove_groups(dev, dev->groups);
  1253. err_remove_type_groups:
  1254. if (type)
  1255. device_remove_groups(dev, type->groups);
  1256. err_remove_class_groups:
  1257. if (class)
  1258. device_remove_groups(dev, class->dev_groups);
  1259. return error;
  1260. }
  1261. static void device_remove_attrs(struct device *dev)
  1262. {
  1263. struct class *class = dev->class;
  1264. const struct device_type *type = dev->type;
  1265. device_remove_file(dev, &dev_attr_online);
  1266. device_remove_groups(dev, dev->groups);
  1267. if (type)
  1268. device_remove_groups(dev, type->groups);
  1269. if (class)
  1270. device_remove_groups(dev, class->dev_groups);
  1271. }
  1272. static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
  1273. char *buf)
  1274. {
  1275. return print_dev_t(buf, dev->devt);
  1276. }
  1277. static DEVICE_ATTR_RO(dev);
  1278. /* /sys/devices/ */
  1279. struct kset *devices_kset;
  1280. /**
  1281. * devices_kset_move_before - Move device in the devices_kset's list.
  1282. * @deva: Device to move.
  1283. * @devb: Device @deva should come before.
  1284. */
  1285. static void devices_kset_move_before(struct device *deva, struct device *devb)
  1286. {
  1287. if (!devices_kset)
  1288. return;
  1289. pr_debug("devices_kset: Moving %s before %s\n",
  1290. dev_name(deva), dev_name(devb));
  1291. spin_lock(&devices_kset->list_lock);
  1292. list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
  1293. spin_unlock(&devices_kset->list_lock);
  1294. }
  1295. /**
  1296. * devices_kset_move_after - Move device in the devices_kset's list.
  1297. * @deva: Device to move
  1298. * @devb: Device @deva should come after.
  1299. */
  1300. static void devices_kset_move_after(struct device *deva, struct device *devb)
  1301. {
  1302. if (!devices_kset)
  1303. return;
  1304. pr_debug("devices_kset: Moving %s after %s\n",
  1305. dev_name(deva), dev_name(devb));
  1306. spin_lock(&devices_kset->list_lock);
  1307. list_move(&deva->kobj.entry, &devb->kobj.entry);
  1308. spin_unlock(&devices_kset->list_lock);
  1309. }
  1310. /**
  1311. * devices_kset_move_last - move the device to the end of devices_kset's list.
  1312. * @dev: device to move
  1313. */
  1314. void devices_kset_move_last(struct device *dev)
  1315. {
  1316. if (!devices_kset)
  1317. return;
  1318. pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
  1319. spin_lock(&devices_kset->list_lock);
  1320. list_move_tail(&dev->kobj.entry, &devices_kset->list);
  1321. spin_unlock(&devices_kset->list_lock);
  1322. }
  1323. /**
  1324. * device_create_file - create sysfs attribute file for device.
  1325. * @dev: device.
  1326. * @attr: device attribute descriptor.
  1327. */
  1328. int device_create_file(struct device *dev,
  1329. const struct device_attribute *attr)
  1330. {
  1331. int error = 0;
  1332. if (dev) {
  1333. WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
  1334. "Attribute %s: write permission without 'store'\n",
  1335. attr->attr.name);
  1336. WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
  1337. "Attribute %s: read permission without 'show'\n",
  1338. attr->attr.name);
  1339. error = sysfs_create_file(&dev->kobj, &attr->attr);
  1340. }
  1341. return error;
  1342. }
  1343. EXPORT_SYMBOL_GPL(device_create_file);
  1344. /**
  1345. * device_remove_file - remove sysfs attribute file.
  1346. * @dev: device.
  1347. * @attr: device attribute descriptor.
  1348. */
  1349. void device_remove_file(struct device *dev,
  1350. const struct device_attribute *attr)
  1351. {
  1352. if (dev)
  1353. sysfs_remove_file(&dev->kobj, &attr->attr);
  1354. }
  1355. EXPORT_SYMBOL_GPL(device_remove_file);
  1356. /**
  1357. * device_remove_file_self - remove sysfs attribute file from its own method.
  1358. * @dev: device.
  1359. * @attr: device attribute descriptor.
  1360. *
  1361. * See kernfs_remove_self() for details.
  1362. */
  1363. bool device_remove_file_self(struct device *dev,
  1364. const struct device_attribute *attr)
  1365. {
  1366. if (dev)
  1367. return sysfs_remove_file_self(&dev->kobj, &attr->attr);
  1368. else
  1369. return false;
  1370. }
  1371. EXPORT_SYMBOL_GPL(device_remove_file_self);
  1372. /**
  1373. * device_create_bin_file - create sysfs binary attribute file for device.
  1374. * @dev: device.
  1375. * @attr: device binary attribute descriptor.
  1376. */
  1377. int device_create_bin_file(struct device *dev,
  1378. const struct bin_attribute *attr)
  1379. {
  1380. int error = -EINVAL;
  1381. if (dev)
  1382. error = sysfs_create_bin_file(&dev->kobj, attr);
  1383. return error;
  1384. }
  1385. EXPORT_SYMBOL_GPL(device_create_bin_file);
  1386. /**
  1387. * device_remove_bin_file - remove sysfs binary attribute file
  1388. * @dev: device.
  1389. * @attr: device binary attribute descriptor.
  1390. */
  1391. void device_remove_bin_file(struct device *dev,
  1392. const struct bin_attribute *attr)
  1393. {
  1394. if (dev)
  1395. sysfs_remove_bin_file(&dev->kobj, attr);
  1396. }
  1397. EXPORT_SYMBOL_GPL(device_remove_bin_file);
  1398. static void klist_children_get(struct klist_node *n)
  1399. {
  1400. struct device_private *p = to_device_private_parent(n);
  1401. struct device *dev = p->device;
  1402. get_device(dev);
  1403. }
  1404. static void klist_children_put(struct klist_node *n)
  1405. {
  1406. struct device_private *p = to_device_private_parent(n);
  1407. struct device *dev = p->device;
  1408. put_device(dev);
  1409. }
  1410. /**
  1411. * device_initialize - init device structure.
  1412. * @dev: device.
  1413. *
  1414. * This prepares the device for use by other layers by initializing
  1415. * its fields.
  1416. * It is the first half of device_register(), if called by
  1417. * that function, though it can also be called separately, so one
  1418. * may use @dev's fields. In particular, get_device()/put_device()
  1419. * may be used for reference counting of @dev after calling this
  1420. * function.
  1421. *
  1422. * All fields in @dev must be initialized by the caller to 0, except
  1423. * for those explicitly set to some other value. The simplest
  1424. * approach is to use kzalloc() to allocate the structure containing
  1425. * @dev.
  1426. *
  1427. * NOTE: Use put_device() to give up your reference instead of freeing
  1428. * @dev directly once you have called this function.
  1429. */
  1430. void device_initialize(struct device *dev)
  1431. {
  1432. dev->kobj.kset = devices_kset;
  1433. kobject_init(&dev->kobj, &device_ktype);
  1434. INIT_LIST_HEAD(&dev->dma_pools);
  1435. mutex_init(&dev->mutex);
  1436. lockdep_set_novalidate_class(&dev->mutex);
  1437. spin_lock_init(&dev->devres_lock);
  1438. INIT_LIST_HEAD(&dev->devres_head);
  1439. device_pm_init(dev);
  1440. set_dev_node(dev, -1);
  1441. #ifdef CONFIG_GENERIC_MSI_IRQ
  1442. INIT_LIST_HEAD(&dev->msi_list);
  1443. #endif
  1444. INIT_LIST_HEAD(&dev->links.consumers);
  1445. INIT_LIST_HEAD(&dev->links.suppliers);
  1446. dev->links.status = DL_DEV_NO_DRIVER;
  1447. }
  1448. EXPORT_SYMBOL_GPL(device_initialize);
  1449. struct kobject *virtual_device_parent(struct device *dev)
  1450. {
  1451. static struct kobject *virtual_dir = NULL;
  1452. if (!virtual_dir)
  1453. virtual_dir = kobject_create_and_add("virtual",
  1454. &devices_kset->kobj);
  1455. return virtual_dir;
  1456. }
  1457. struct class_dir {
  1458. struct kobject kobj;
  1459. struct class *class;
  1460. };
  1461. #define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
  1462. static void class_dir_release(struct kobject *kobj)
  1463. {
  1464. struct class_dir *dir = to_class_dir(kobj);
  1465. kfree(dir);
  1466. }
  1467. static const
  1468. struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
  1469. {
  1470. struct class_dir *dir = to_class_dir(kobj);
  1471. return dir->class->ns_type;
  1472. }
  1473. static struct kobj_type class_dir_ktype = {
  1474. .release = class_dir_release,
  1475. .sysfs_ops = &kobj_sysfs_ops,
  1476. .child_ns_type = class_dir_child_ns_type
  1477. };
  1478. static struct kobject *
  1479. class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
  1480. {
  1481. struct class_dir *dir;
  1482. int retval;
  1483. dir = kzalloc(sizeof(*dir), GFP_KERNEL);
  1484. if (!dir)
  1485. return ERR_PTR(-ENOMEM);
  1486. dir->class = class;
  1487. kobject_init(&dir->kobj, &class_dir_ktype);
  1488. dir->kobj.kset = &class->p->glue_dirs;
  1489. retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
  1490. if (retval < 0) {
  1491. kobject_put(&dir->kobj);
  1492. return ERR_PTR(retval);
  1493. }
  1494. return &dir->kobj;
  1495. }
  1496. static DEFINE_MUTEX(gdp_mutex);
  1497. static struct kobject *get_device_parent(struct device *dev,
  1498. struct device *parent)
  1499. {
  1500. if (dev->class) {
  1501. struct kobject *kobj = NULL;
  1502. struct kobject *parent_kobj;
  1503. struct kobject *k;
  1504. #ifdef CONFIG_BLOCK
  1505. /* block disks show up in /sys/block */
  1506. if (sysfs_deprecated && dev->class == &block_class) {
  1507. if (parent && parent->class == &block_class)
  1508. return &parent->kobj;
  1509. return &block_class.p->subsys.kobj;
  1510. }
  1511. #endif
  1512. /*
  1513. * If we have no parent, we live in "virtual".
  1514. * Class-devices with a non class-device as parent, live
  1515. * in a "glue" directory to prevent namespace collisions.
  1516. */
  1517. if (parent == NULL)
  1518. parent_kobj = virtual_device_parent(dev);
  1519. else if (parent->class && !dev->class->ns_type)
  1520. return &parent->kobj;
  1521. else
  1522. parent_kobj = &parent->kobj;
  1523. mutex_lock(&gdp_mutex);
  1524. /* find our class-directory at the parent and reference it */
  1525. spin_lock(&dev->class->p->glue_dirs.list_lock);
  1526. list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
  1527. if (k->parent == parent_kobj) {
  1528. kobj = kobject_get(k);
  1529. break;
  1530. }
  1531. spin_unlock(&dev->class->p->glue_dirs.list_lock);
  1532. if (kobj) {
  1533. mutex_unlock(&gdp_mutex);
  1534. return kobj;
  1535. }
  1536. /* or create a new class-directory at the parent device */
  1537. k = class_dir_create_and_add(dev->class, parent_kobj);
  1538. /* do not emit an uevent for this simple "glue" directory */
  1539. mutex_unlock(&gdp_mutex);
  1540. return k;
  1541. }
  1542. /* subsystems can specify a default root directory for their devices */
  1543. if (!parent && dev->bus && dev->bus->dev_root)
  1544. return &dev->bus->dev_root->kobj;
  1545. if (parent)
  1546. return &parent->kobj;
  1547. return NULL;
  1548. }
  1549. static inline bool live_in_glue_dir(struct kobject *kobj,
  1550. struct device *dev)
  1551. {
  1552. if (!kobj || !dev->class ||
  1553. kobj->kset != &dev->class->p->glue_dirs)
  1554. return false;
  1555. return true;
  1556. }
  1557. static inline struct kobject *get_glue_dir(struct device *dev)
  1558. {
  1559. return dev->kobj.parent;
  1560. }
  1561. /*
  1562. * make sure cleaning up dir as the last step, we need to make
  1563. * sure .release handler of kobject is run with holding the
  1564. * global lock
  1565. */
  1566. static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
  1567. {
  1568. unsigned int ref;
  1569. /* see if we live in a "glue" directory */
  1570. if (!live_in_glue_dir(glue_dir, dev))
  1571. return;
  1572. mutex_lock(&gdp_mutex);
  1573. /**
  1574. * There is a race condition between removing glue directory
  1575. * and adding a new device under the glue directory.
  1576. *
  1577. * CPU1: CPU2:
  1578. *
  1579. * device_add()
  1580. * get_device_parent()
  1581. * class_dir_create_and_add()
  1582. * kobject_add_internal()
  1583. * create_dir() // create glue_dir
  1584. *
  1585. * device_add()
  1586. * get_device_parent()
  1587. * kobject_get() // get glue_dir
  1588. *
  1589. * device_del()
  1590. * cleanup_glue_dir()
  1591. * kobject_del(glue_dir)
  1592. *
  1593. * kobject_add()
  1594. * kobject_add_internal()
  1595. * create_dir() // in glue_dir
  1596. * sysfs_create_dir_ns()
  1597. * kernfs_create_dir_ns(sd)
  1598. *
  1599. * sysfs_remove_dir() // glue_dir->sd=NULL
  1600. * sysfs_put() // free glue_dir->sd
  1601. *
  1602. * // sd is freed
  1603. * kernfs_new_node(sd)
  1604. * kernfs_get(glue_dir)
  1605. * kernfs_add_one()
  1606. * kernfs_put()
  1607. *
  1608. * Before CPU1 remove last child device under glue dir, if CPU2 add
  1609. * a new device under glue dir, the glue_dir kobject reference count
  1610. * will be increase to 2 in kobject_get(k). And CPU2 has been called
  1611. * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
  1612. * and sysfs_put(). This result in glue_dir->sd is freed.
  1613. *
  1614. * Then the CPU2 will see a stale "empty" but still potentially used
  1615. * glue dir around in kernfs_new_node().
  1616. *
  1617. * In order to avoid this happening, we also should make sure that
  1618. * kernfs_node for glue_dir is released in CPU1 only when refcount
  1619. * for glue_dir kobj is 1.
  1620. */
  1621. ref = kref_read(&glue_dir->kref);
  1622. if (!kobject_has_children(glue_dir) && !--ref)
  1623. kobject_del(glue_dir);
  1624. kobject_put(glue_dir);
  1625. mutex_unlock(&gdp_mutex);
  1626. }
  1627. static int device_add_class_symlinks(struct device *dev)
  1628. {
  1629. struct device_node *of_node = dev_of_node(dev);
  1630. int error;
  1631. if (of_node) {
  1632. error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
  1633. if (error)
  1634. dev_warn(dev, "Error %d creating of_node link\n",error);
  1635. /* An error here doesn't warrant bringing down the device */
  1636. }
  1637. if (!dev->class)
  1638. return 0;
  1639. error = sysfs_create_link(&dev->kobj,
  1640. &dev->class->p->subsys.kobj,
  1641. "subsystem");
  1642. if (error)
  1643. goto out_devnode;
  1644. if (dev->parent && device_is_not_partition(dev)) {
  1645. error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
  1646. "device");
  1647. if (error)
  1648. goto out_subsys;
  1649. }
  1650. #ifdef CONFIG_BLOCK
  1651. /* /sys/block has directories and does not need symlinks */
  1652. if (sysfs_deprecated && dev->class == &block_class)
  1653. return 0;
  1654. #endif
  1655. /* link in the class directory pointing to the device */
  1656. error = sysfs_create_link(&dev->class->p->subsys.kobj,
  1657. &dev->kobj, dev_name(dev));
  1658. if (error)
  1659. goto out_device;
  1660. return 0;
  1661. out_device:
  1662. sysfs_remove_link(&dev->kobj, "device");
  1663. out_subsys:
  1664. sysfs_remove_link(&dev->kobj, "subsystem");
  1665. out_devnode:
  1666. sysfs_remove_link(&dev->kobj, "of_node");
  1667. return error;
  1668. }
  1669. static void device_remove_class_symlinks(struct device *dev)
  1670. {
  1671. if (dev_of_node(dev))
  1672. sysfs_remove_link(&dev->kobj, "of_node");
  1673. if (!dev->class)
  1674. return;
  1675. if (dev->parent && device_is_not_partition(dev))
  1676. sysfs_remove_link(&dev->kobj, "device");
  1677. sysfs_remove_link(&dev->kobj, "subsystem");
  1678. #ifdef CONFIG_BLOCK
  1679. if (sysfs_deprecated && dev->class == &block_class)
  1680. return;
  1681. #endif
  1682. sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
  1683. }
  1684. /**
  1685. * dev_set_name - set a device name
  1686. * @dev: device
  1687. * @fmt: format string for the device's name
  1688. */
  1689. int dev_set_name(struct device *dev, const char *fmt, ...)
  1690. {
  1691. va_list vargs;
  1692. int err;
  1693. va_start(vargs, fmt);
  1694. err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
  1695. va_end(vargs);
  1696. return err;
  1697. }
  1698. EXPORT_SYMBOL_GPL(dev_set_name);
  1699. /**
  1700. * device_to_dev_kobj - select a /sys/dev/ directory for the device
  1701. * @dev: device
  1702. *
  1703. * By default we select char/ for new entries. Setting class->dev_obj
  1704. * to NULL prevents an entry from being created. class->dev_kobj must
  1705. * be set (or cleared) before any devices are registered to the class
  1706. * otherwise device_create_sys_dev_entry() and
  1707. * device_remove_sys_dev_entry() will disagree about the presence of
  1708. * the link.
  1709. */
  1710. static struct kobject *device_to_dev_kobj(struct device *dev)
  1711. {
  1712. struct kobject *kobj;
  1713. if (dev->class)
  1714. kobj = dev->class->dev_kobj;
  1715. else
  1716. kobj = sysfs_dev_char_kobj;
  1717. return kobj;
  1718. }
  1719. static int device_create_sys_dev_entry(struct device *dev)
  1720. {
  1721. struct kobject *kobj = device_to_dev_kobj(dev);
  1722. int error = 0;
  1723. char devt_str[15];
  1724. if (kobj) {
  1725. format_dev_t(devt_str, dev->devt);
  1726. error = sysfs_create_link(kobj, &dev->kobj, devt_str);
  1727. }
  1728. return error;
  1729. }
  1730. static void device_remove_sys_dev_entry(struct device *dev)
  1731. {
  1732. struct kobject *kobj = device_to_dev_kobj(dev);
  1733. char devt_str[15];
  1734. if (kobj) {
  1735. format_dev_t(devt_str, dev->devt);
  1736. sysfs_remove_link(kobj, devt_str);
  1737. }
  1738. }
  1739. static int device_private_init(struct device *dev)
  1740. {
  1741. dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
  1742. if (!dev->p)
  1743. return -ENOMEM;
  1744. dev->p->device = dev;
  1745. klist_init(&dev->p->klist_children, klist_children_get,
  1746. klist_children_put);
  1747. INIT_LIST_HEAD(&dev->p->deferred_probe);
  1748. return 0;
  1749. }
  1750. /**
  1751. * device_add - add device to device hierarchy.
  1752. * @dev: device.
  1753. *
  1754. * This is part 2 of device_register(), though may be called
  1755. * separately _iff_ device_initialize() has been called separately.
  1756. *
  1757. * This adds @dev to the kobject hierarchy via kobject_add(), adds it
  1758. * to the global and sibling lists for the device, then
  1759. * adds it to the other relevant subsystems of the driver model.
  1760. *
  1761. * Do not call this routine or device_register() more than once for
  1762. * any device structure. The driver model core is not designed to work
  1763. * with devices that get unregistered and then spring back to life.
  1764. * (Among other things, it's very hard to guarantee that all references
  1765. * to the previous incarnation of @dev have been dropped.) Allocate
  1766. * and register a fresh new struct device instead.
  1767. *
  1768. * NOTE: _Never_ directly free @dev after calling this function, even
  1769. * if it returned an error! Always use put_device() to give up your
  1770. * reference instead.
  1771. */
  1772. int device_add(struct device *dev)
  1773. {
  1774. struct device *parent;
  1775. struct kobject *kobj;
  1776. struct class_interface *class_intf;
  1777. int error = -EINVAL;
  1778. struct kobject *glue_dir = NULL;
  1779. dev = get_device(dev);
  1780. if (!dev)
  1781. goto done;
  1782. if (!dev->p) {
  1783. error = device_private_init(dev);
  1784. if (error)
  1785. goto done;
  1786. }
  1787. /*
  1788. * for statically allocated devices, which should all be converted
  1789. * some day, we need to initialize the name. We prevent reading back
  1790. * the name, and force the use of dev_name()
  1791. */
  1792. if (dev->init_name) {
  1793. dev_set_name(dev, "%s", dev->init_name);
  1794. dev->init_name = NULL;
  1795. }
  1796. /* subsystems can specify simple device enumeration */
  1797. if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
  1798. dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
  1799. if (!dev_name(dev)) {
  1800. error = -EINVAL;
  1801. goto name_error;
  1802. }
  1803. pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
  1804. parent = get_device(dev->parent);
  1805. kobj = get_device_parent(dev, parent);
  1806. if (IS_ERR(kobj)) {
  1807. error = PTR_ERR(kobj);
  1808. goto parent_error;
  1809. }
  1810. if (kobj)
  1811. dev->kobj.parent = kobj;
  1812. /* use parent numa_node */
  1813. if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
  1814. set_dev_node(dev, dev_to_node(parent));
  1815. /* first, register with generic layer. */
  1816. /* we require the name to be set before, and pass NULL */
  1817. error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
  1818. if (error) {
  1819. glue_dir = get_glue_dir(dev);
  1820. goto Error;
  1821. }
  1822. /* notify platform of device entry */
  1823. if (platform_notify)
  1824. platform_notify(dev);
  1825. error = device_create_file(dev, &dev_attr_uevent);
  1826. if (error)
  1827. goto attrError;
  1828. error = device_add_class_symlinks(dev);
  1829. if (error)
  1830. goto SymlinkError;
  1831. error = device_add_attrs(dev);
  1832. if (error)
  1833. goto AttrsError;
  1834. error = bus_add_device(dev);
  1835. if (error)
  1836. goto BusError;
  1837. error = dpm_sysfs_add(dev);
  1838. if (error)
  1839. goto DPMError;
  1840. device_pm_add(dev);
  1841. if (MAJOR(dev->devt)) {
  1842. error = device_create_file(dev, &dev_attr_dev);
  1843. if (error)
  1844. goto DevAttrError;
  1845. error = device_create_sys_dev_entry(dev);
  1846. if (error)
  1847. goto SysEntryError;
  1848. devtmpfs_create_node(dev);
  1849. }
  1850. /* Notify clients of device addition. This call must come
  1851. * after dpm_sysfs_add() and before kobject_uevent().
  1852. */
  1853. if (dev->bus)
  1854. blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
  1855. BUS_NOTIFY_ADD_DEVICE, dev);
  1856. kobject_uevent(&dev->kobj, KOBJ_ADD);
  1857. bus_probe_device(dev);
  1858. if (parent)
  1859. klist_add_tail(&dev->p->knode_parent,
  1860. &parent->p->klist_children);
  1861. if (dev->class) {
  1862. mutex_lock(&dev->class->p->mutex);
  1863. /* tie the class to the device */
  1864. klist_add_tail(&dev->knode_class,
  1865. &dev->class->p->klist_devices);
  1866. /* notify any interfaces that the device is here */
  1867. list_for_each_entry(class_intf,
  1868. &dev->class->p->interfaces, node)
  1869. if (class_intf->add_dev)
  1870. class_intf->add_dev(dev, class_intf);
  1871. mutex_unlock(&dev->class->p->mutex);
  1872. }
  1873. done:
  1874. put_device(dev);
  1875. return error;
  1876. SysEntryError:
  1877. if (MAJOR(dev->devt))
  1878. device_remove_file(dev, &dev_attr_dev);
  1879. DevAttrError:
  1880. device_pm_remove(dev);
  1881. dpm_sysfs_remove(dev);
  1882. DPMError:
  1883. bus_remove_device(dev);
  1884. BusError:
  1885. device_remove_attrs(dev);
  1886. AttrsError:
  1887. device_remove_class_symlinks(dev);
  1888. SymlinkError:
  1889. device_remove_file(dev, &dev_attr_uevent);
  1890. attrError:
  1891. kobject_uevent(&dev->kobj, KOBJ_REMOVE);
  1892. glue_dir = get_glue_dir(dev);
  1893. kobject_del(&dev->kobj);
  1894. Error:
  1895. cleanup_glue_dir(dev, glue_dir);
  1896. parent_error:
  1897. put_device(parent);
  1898. name_error:
  1899. kfree(dev->p);
  1900. dev->p = NULL;
  1901. goto done;
  1902. }
  1903. EXPORT_SYMBOL_GPL(device_add);
  1904. /**
  1905. * device_register - register a device with the system.
  1906. * @dev: pointer to the device structure
  1907. *
  1908. * This happens in two clean steps - initialize the device
  1909. * and add it to the system. The two steps can be called
  1910. * separately, but this is the easiest and most common.
  1911. * I.e. you should only call the two helpers separately if
  1912. * have a clearly defined need to use and refcount the device
  1913. * before it is added to the hierarchy.
  1914. *
  1915. * For more information, see the kerneldoc for device_initialize()
  1916. * and device_add().
  1917. *
  1918. * NOTE: _Never_ directly free @dev after calling this function, even
  1919. * if it returned an error! Always use put_device() to give up the
  1920. * reference initialized in this function instead.
  1921. */
  1922. int device_register(struct device *dev)
  1923. {
  1924. device_initialize(dev);
  1925. return device_add(dev);
  1926. }
  1927. EXPORT_SYMBOL_GPL(device_register);
  1928. /**
  1929. * get_device - increment reference count for device.
  1930. * @dev: device.
  1931. *
  1932. * This simply forwards the call to kobject_get(), though
  1933. * we do take care to provide for the case that we get a NULL
  1934. * pointer passed in.
  1935. */
  1936. struct device *get_device(struct device *dev)
  1937. {
  1938. return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
  1939. }
  1940. EXPORT_SYMBOL_GPL(get_device);
  1941. /**
  1942. * put_device - decrement reference count.
  1943. * @dev: device in question.
  1944. */
  1945. void put_device(struct device *dev)
  1946. {
  1947. /* might_sleep(); */
  1948. if (dev)
  1949. kobject_put(&dev->kobj);
  1950. }
  1951. EXPORT_SYMBOL_GPL(put_device);
  1952. bool kill_device(struct device *dev)
  1953. {
  1954. /*
  1955. * Require the device lock and set the "dead" flag to guarantee that
  1956. * the update behavior is consistent with the other bitfields near
  1957. * it and that we cannot have an asynchronous probe routine trying
  1958. * to run while we are tearing out the bus/class/sysfs from
  1959. * underneath the device.
  1960. */
  1961. lockdep_assert_held(&dev->mutex);
  1962. if (dev->p->dead)
  1963. return false;
  1964. dev->p->dead = true;
  1965. return true;
  1966. }
  1967. EXPORT_SYMBOL_GPL(kill_device);
  1968. /**
  1969. * device_del - delete device from system.
  1970. * @dev: device.
  1971. *
  1972. * This is the first part of the device unregistration
  1973. * sequence. This removes the device from the lists we control
  1974. * from here, has it removed from the other driver model
  1975. * subsystems it was added to in device_add(), and removes it
  1976. * from the kobject hierarchy.
  1977. *
  1978. * NOTE: this should be called manually _iff_ device_add() was
  1979. * also called manually.
  1980. */
  1981. void device_del(struct device *dev)
  1982. {
  1983. struct device *parent = dev->parent;
  1984. struct kobject *glue_dir = NULL;
  1985. struct class_interface *class_intf;
  1986. device_lock(dev);
  1987. kill_device(dev);
  1988. device_unlock(dev);
  1989. /* Notify clients of device removal. This call must come
  1990. * before dpm_sysfs_remove().
  1991. */
  1992. if (dev->bus)
  1993. blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
  1994. BUS_NOTIFY_DEL_DEVICE, dev);
  1995. dpm_sysfs_remove(dev);
  1996. if (parent)
  1997. klist_del(&dev->p->knode_parent);
  1998. if (MAJOR(dev->devt)) {
  1999. devtmpfs_delete_node(dev);
  2000. device_remove_sys_dev_entry(dev);
  2001. device_remove_file(dev, &dev_attr_dev);
  2002. }
  2003. if (dev->class) {
  2004. device_remove_class_symlinks(dev);
  2005. mutex_lock(&dev->class->p->mutex);
  2006. /* notify any interfaces that the device is now gone */
  2007. list_for_each_entry(class_intf,
  2008. &dev->class->p->interfaces, node)
  2009. if (class_intf->remove_dev)
  2010. class_intf->remove_dev(dev, class_intf);
  2011. /* remove the device from the class list */
  2012. klist_del(&dev->knode_class);
  2013. mutex_unlock(&dev->class->p->mutex);
  2014. }
  2015. device_remove_file(dev, &dev_attr_uevent);
  2016. device_remove_attrs(dev);
  2017. bus_remove_device(dev);
  2018. device_pm_remove(dev);
  2019. driver_deferred_probe_del(dev);
  2020. device_remove_properties(dev);
  2021. device_links_purge(dev);
  2022. /* Notify the platform of the removal, in case they
  2023. * need to do anything...
  2024. */
  2025. if (platform_notify_remove)
  2026. platform_notify_remove(dev);
  2027. if (dev->bus)
  2028. blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
  2029. BUS_NOTIFY_REMOVED_DEVICE, dev);
  2030. kobject_uevent(&dev->kobj, KOBJ_REMOVE);
  2031. glue_dir = get_glue_dir(dev);
  2032. kobject_del(&dev->kobj);
  2033. cleanup_glue_dir(dev, glue_dir);
  2034. put_device(parent);
  2035. }
  2036. EXPORT_SYMBOL_GPL(device_del);
  2037. /**
  2038. * device_unregister - unregister device from system.
  2039. * @dev: device going away.
  2040. *
  2041. * We do this in two parts, like we do device_register(). First,
  2042. * we remove it from all the subsystems with device_del(), then
  2043. * we decrement the reference count via put_device(). If that
  2044. * is the final reference count, the device will be cleaned up
  2045. * via device_release() above. Otherwise, the structure will
  2046. * stick around until the final reference to the device is dropped.
  2047. */
  2048. void device_unregister(struct device *dev)
  2049. {
  2050. pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
  2051. device_del(dev);
  2052. put_device(dev);
  2053. }
  2054. EXPORT_SYMBOL_GPL(device_unregister);
  2055. static struct device *prev_device(struct klist_iter *i)
  2056. {
  2057. struct klist_node *n = klist_prev(i);
  2058. struct device *dev = NULL;
  2059. struct device_private *p;
  2060. if (n) {
  2061. p = to_device_private_parent(n);
  2062. dev = p->device;
  2063. }
  2064. return dev;
  2065. }
  2066. static struct device *next_device(struct klist_iter *i)
  2067. {
  2068. struct klist_node *n = klist_next(i);
  2069. struct device *dev = NULL;
  2070. struct device_private *p;
  2071. if (n) {
  2072. p = to_device_private_parent(n);
  2073. dev = p->device;
  2074. }
  2075. return dev;
  2076. }
  2077. /**
  2078. * device_get_devnode - path of device node file
  2079. * @dev: device
  2080. * @mode: returned file access mode
  2081. * @uid: returned file owner
  2082. * @gid: returned file group
  2083. * @tmp: possibly allocated string
  2084. *
  2085. * Return the relative path of a possible device node.
  2086. * Non-default names may need to allocate a memory to compose
  2087. * a name. This memory is returned in tmp and needs to be
  2088. * freed by the caller.
  2089. */
  2090. const char *device_get_devnode(struct device *dev,
  2091. umode_t *mode, kuid_t *uid, kgid_t *gid,
  2092. const char **tmp)
  2093. {
  2094. char *s;
  2095. *tmp = NULL;
  2096. /* the device type may provide a specific name */
  2097. if (dev->type && dev->type->devnode)
  2098. *tmp = dev->type->devnode(dev, mode, uid, gid);
  2099. if (*tmp)
  2100. return *tmp;
  2101. /* the class may provide a specific name */
  2102. if (dev->class && dev->class->devnode)
  2103. *tmp = dev->class->devnode(dev, mode);
  2104. if (*tmp)
  2105. return *tmp;
  2106. /* return name without allocation, tmp == NULL */
  2107. if (strchr(dev_name(dev), '!') == NULL)
  2108. return dev_name(dev);
  2109. /* replace '!' in the name with '/' */
  2110. s = kstrdup(dev_name(dev), GFP_KERNEL);
  2111. if (!s)
  2112. return NULL;
  2113. strreplace(s, '!', '/');
  2114. return *tmp = s;
  2115. }
  2116. /**
  2117. * device_for_each_child - device child iterator.
  2118. * @parent: parent struct device.
  2119. * @fn: function to be called for each device.
  2120. * @data: data for the callback.
  2121. *
  2122. * Iterate over @parent's child devices, and call @fn for each,
  2123. * passing it @data.
  2124. *
  2125. * We check the return of @fn each time. If it returns anything
  2126. * other than 0, we break out and return that value.
  2127. */
  2128. int device_for_each_child(struct device *parent, void *data,
  2129. int (*fn)(struct device *dev, void *data))
  2130. {
  2131. struct klist_iter i;
  2132. struct device *child;
  2133. int error = 0;
  2134. if (!parent->p)
  2135. return 0;
  2136. klist_iter_init(&parent->p->klist_children, &i);
  2137. while (!error && (child = next_device(&i)))
  2138. error = fn(child, data);
  2139. klist_iter_exit(&i);
  2140. return error;
  2141. }
  2142. EXPORT_SYMBOL_GPL(device_for_each_child);
  2143. /**
  2144. * device_for_each_child_reverse - device child iterator in reversed order.
  2145. * @parent: parent struct device.
  2146. * @fn: function to be called for each device.
  2147. * @data: data for the callback.
  2148. *
  2149. * Iterate over @parent's child devices, and call @fn for each,
  2150. * passing it @data.
  2151. *
  2152. * We check the return of @fn each time. If it returns anything
  2153. * other than 0, we break out and return that value.
  2154. */
  2155. int device_for_each_child_reverse(struct device *parent, void *data,
  2156. int (*fn)(struct device *dev, void *data))
  2157. {
  2158. struct klist_iter i;
  2159. struct device *child;
  2160. int error = 0;
  2161. if (!parent->p)
  2162. return 0;
  2163. klist_iter_init(&parent->p->klist_children, &i);
  2164. while ((child = prev_device(&i)) && !error)
  2165. error = fn(child, data);
  2166. klist_iter_exit(&i);
  2167. return error;
  2168. }
  2169. EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
  2170. /**
  2171. * device_find_child - device iterator for locating a particular device.
  2172. * @parent: parent struct device
  2173. * @match: Callback function to check device
  2174. * @data: Data to pass to match function
  2175. *
  2176. * This is similar to the device_for_each_child() function above, but it
  2177. * returns a reference to a device that is 'found' for later use, as
  2178. * determined by the @match callback.
  2179. *
  2180. * The callback should return 0 if the device doesn't match and non-zero
  2181. * if it does. If the callback returns non-zero and a reference to the
  2182. * current device can be obtained, this function will return to the caller
  2183. * and not iterate over any more devices.
  2184. *
  2185. * NOTE: you will need to drop the reference with put_device() after use.
  2186. */
  2187. struct device *device_find_child(struct device *parent, void *data,
  2188. int (*match)(struct device *dev, void *data))
  2189. {
  2190. struct klist_iter i;
  2191. struct device *child;
  2192. if (!parent)
  2193. return NULL;
  2194. klist_iter_init(&parent->p->klist_children, &i);
  2195. while ((child = next_device(&i)))
  2196. if (match(child, data) && get_device(child))
  2197. break;
  2198. klist_iter_exit(&i);
  2199. return child;
  2200. }
  2201. EXPORT_SYMBOL_GPL(device_find_child);
  2202. int __init devices_init(void)
  2203. {
  2204. devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
  2205. if (!devices_kset)
  2206. return -ENOMEM;
  2207. dev_kobj = kobject_create_and_add("dev", NULL);
  2208. if (!dev_kobj)
  2209. goto dev_kobj_err;
  2210. sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
  2211. if (!sysfs_dev_block_kobj)
  2212. goto block_kobj_err;
  2213. sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
  2214. if (!sysfs_dev_char_kobj)
  2215. goto char_kobj_err;
  2216. return 0;
  2217. char_kobj_err:
  2218. kobject_put(sysfs_dev_block_kobj);
  2219. block_kobj_err:
  2220. kobject_put(dev_kobj);
  2221. dev_kobj_err:
  2222. kset_unregister(devices_kset);
  2223. return -ENOMEM;
  2224. }
  2225. static int device_check_offline(struct device *dev, void *not_used)
  2226. {
  2227. int ret;
  2228. ret = device_for_each_child(dev, NULL, device_check_offline);
  2229. if (ret)
  2230. return ret;
  2231. return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
  2232. }
  2233. /**
  2234. * device_offline - Prepare the device for hot-removal.
  2235. * @dev: Device to be put offline.
  2236. *
  2237. * Execute the device bus type's .offline() callback, if present, to prepare
  2238. * the device for a subsequent hot-removal. If that succeeds, the device must
  2239. * not be used until either it is removed or its bus type's .online() callback
  2240. * is executed.
  2241. *
  2242. * Call under device_hotplug_lock.
  2243. */
  2244. int device_offline(struct device *dev)
  2245. {
  2246. int ret;
  2247. if (dev->offline_disabled)
  2248. return -EPERM;
  2249. ret = device_for_each_child(dev, NULL, device_check_offline);
  2250. if (ret)
  2251. return ret;
  2252. device_lock(dev);
  2253. if (device_supports_offline(dev)) {
  2254. if (dev->offline) {
  2255. ret = 1;
  2256. } else {
  2257. ret = dev->bus->offline(dev);
  2258. if (!ret) {
  2259. kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
  2260. dev->offline = true;
  2261. }
  2262. }
  2263. }
  2264. device_unlock(dev);
  2265. return ret;
  2266. }
  2267. /**
  2268. * device_online - Put the device back online after successful device_offline().
  2269. * @dev: Device to be put back online.
  2270. *
  2271. * If device_offline() has been successfully executed for @dev, but the device
  2272. * has not been removed subsequently, execute its bus type's .online() callback
  2273. * to indicate that the device can be used again.
  2274. *
  2275. * Call under device_hotplug_lock.
  2276. */
  2277. int device_online(struct device *dev)
  2278. {
  2279. int ret = 0;
  2280. device_lock(dev);
  2281. if (device_supports_offline(dev)) {
  2282. if (dev->offline) {
  2283. ret = dev->bus->online(dev);
  2284. if (!ret) {
  2285. kobject_uevent(&dev->kobj, KOBJ_ONLINE);
  2286. dev->offline = false;
  2287. }
  2288. } else {
  2289. ret = 1;
  2290. }
  2291. }
  2292. device_unlock(dev);
  2293. return ret;
  2294. }
  2295. struct root_device {
  2296. struct device dev;
  2297. struct module *owner;
  2298. };
  2299. static inline struct root_device *to_root_device(struct device *d)
  2300. {
  2301. return container_of(d, struct root_device, dev);
  2302. }
  2303. static void root_device_release(struct device *dev)
  2304. {
  2305. kfree(to_root_device(dev));
  2306. }
  2307. /**
  2308. * __root_device_register - allocate and register a root device
  2309. * @name: root device name
  2310. * @owner: owner module of the root device, usually THIS_MODULE
  2311. *
  2312. * This function allocates a root device and registers it
  2313. * using device_register(). In order to free the returned
  2314. * device, use root_device_unregister().
  2315. *
  2316. * Root devices are dummy devices which allow other devices
  2317. * to be grouped under /sys/devices. Use this function to
  2318. * allocate a root device and then use it as the parent of
  2319. * any device which should appear under /sys/devices/{name}
  2320. *
  2321. * The /sys/devices/{name} directory will also contain a
  2322. * 'module' symlink which points to the @owner directory
  2323. * in sysfs.
  2324. *
  2325. * Returns &struct device pointer on success, or ERR_PTR() on error.
  2326. *
  2327. * Note: You probably want to use root_device_register().
  2328. */
  2329. struct device *__root_device_register(const char *name, struct module *owner)
  2330. {
  2331. struct root_device *root;
  2332. int err = -ENOMEM;
  2333. root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
  2334. if (!root)
  2335. return ERR_PTR(err);
  2336. err = dev_set_name(&root->dev, "%s", name);
  2337. if (err) {
  2338. kfree(root);
  2339. return ERR_PTR(err);
  2340. }
  2341. root->dev.release = root_device_release;
  2342. err = device_register(&root->dev);
  2343. if (err) {
  2344. put_device(&root->dev);
  2345. return ERR_PTR(err);
  2346. }
  2347. #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
  2348. if (owner) {
  2349. struct module_kobject *mk = &owner->mkobj;
  2350. err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
  2351. if (err) {
  2352. device_unregister(&root->dev);
  2353. return ERR_PTR(err);
  2354. }
  2355. root->owner = owner;
  2356. }
  2357. #endif
  2358. return &root->dev;
  2359. }
  2360. EXPORT_SYMBOL_GPL(__root_device_register);
  2361. /**
  2362. * root_device_unregister - unregister and free a root device
  2363. * @dev: device going away
  2364. *
  2365. * This function unregisters and cleans up a device that was created by
  2366. * root_device_register().
  2367. */
  2368. void root_device_unregister(struct device *dev)
  2369. {
  2370. struct root_device *root = to_root_device(dev);
  2371. if (root->owner)
  2372. sysfs_remove_link(&root->dev.kobj, "module");
  2373. device_unregister(dev);
  2374. }
  2375. EXPORT_SYMBOL_GPL(root_device_unregister);
  2376. static void device_create_release(struct device *dev)
  2377. {
  2378. pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
  2379. kfree(dev);
  2380. }
  2381. static __printf(6, 0) struct device *
  2382. device_create_groups_vargs(struct class *class, struct device *parent,
  2383. dev_t devt, void *drvdata,
  2384. const struct attribute_group **groups,
  2385. const char *fmt, va_list args)
  2386. {
  2387. struct device *dev = NULL;
  2388. int retval = -ENODEV;
  2389. if (class == NULL || IS_ERR(class))
  2390. goto error;
  2391. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  2392. if (!dev) {
  2393. retval = -ENOMEM;
  2394. goto error;
  2395. }
  2396. device_initialize(dev);
  2397. dev->devt = devt;
  2398. dev->class = class;
  2399. dev->parent = parent;
  2400. dev->groups = groups;
  2401. dev->release = device_create_release;
  2402. dev_set_drvdata(dev, drvdata);
  2403. retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
  2404. if (retval)
  2405. goto error;
  2406. retval = device_add(dev);
  2407. if (retval)
  2408. goto error;
  2409. return dev;
  2410. error:
  2411. put_device(dev);
  2412. return ERR_PTR(retval);
  2413. }
  2414. /**
  2415. * device_create_vargs - creates a device and registers it with sysfs
  2416. * @class: pointer to the struct class that this device should be registered to
  2417. * @parent: pointer to the parent struct device of this new device, if any
  2418. * @devt: the dev_t for the char device to be added
  2419. * @drvdata: the data to be added to the device for callbacks
  2420. * @fmt: string for the device's name
  2421. * @args: va_list for the device's name
  2422. *
  2423. * This function can be used by char device classes. A struct device
  2424. * will be created in sysfs, registered to the specified class.
  2425. *
  2426. * A "dev" file will be created, showing the dev_t for the device, if
  2427. * the dev_t is not 0,0.
  2428. * If a pointer to a parent struct device is passed in, the newly created
  2429. * struct device will be a child of that device in sysfs.
  2430. * The pointer to the struct device will be returned from the call.
  2431. * Any further sysfs files that might be required can be created using this
  2432. * pointer.
  2433. *
  2434. * Returns &struct device pointer on success, or ERR_PTR() on error.
  2435. *
  2436. * Note: the struct class passed to this function must have previously
  2437. * been created with a call to class_create().
  2438. */
  2439. struct device *device_create_vargs(struct class *class, struct device *parent,
  2440. dev_t devt, void *drvdata, const char *fmt,
  2441. va_list args)
  2442. {
  2443. return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
  2444. fmt, args);
  2445. }
  2446. EXPORT_SYMBOL_GPL(device_create_vargs);
  2447. /**
  2448. * device_create - creates a device and registers it with sysfs
  2449. * @class: pointer to the struct class that this device should be registered to
  2450. * @parent: pointer to the parent struct device of this new device, if any
  2451. * @devt: the dev_t for the char device to be added
  2452. * @drvdata: the data to be added to the device for callbacks
  2453. * @fmt: string for the device's name
  2454. *
  2455. * This function can be used by char device classes. A struct device
  2456. * will be created in sysfs, registered to the specified class.
  2457. *
  2458. * A "dev" file will be created, showing the dev_t for the device, if
  2459. * the dev_t is not 0,0.
  2460. * If a pointer to a parent struct device is passed in, the newly created
  2461. * struct device will be a child of that device in sysfs.
  2462. * The pointer to the struct device will be returned from the call.
  2463. * Any further sysfs files that might be required can be created using this
  2464. * pointer.
  2465. *
  2466. * Returns &struct device pointer on success, or ERR_PTR() on error.
  2467. *
  2468. * Note: the struct class passed to this function must have previously
  2469. * been created with a call to class_create().
  2470. */
  2471. struct device *device_create(struct class *class, struct device *parent,
  2472. dev_t devt, void *drvdata, const char *fmt, ...)
  2473. {
  2474. va_list vargs;
  2475. struct device *dev;
  2476. va_start(vargs, fmt);
  2477. dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
  2478. va_end(vargs);
  2479. return dev;
  2480. }
  2481. EXPORT_SYMBOL_GPL(device_create);
  2482. /**
  2483. * device_create_with_groups - creates a device and registers it with sysfs
  2484. * @class: pointer to the struct class that this device should be registered to
  2485. * @parent: pointer to the parent struct device of this new device, if any
  2486. * @devt: the dev_t for the char device to be added
  2487. * @drvdata: the data to be added to the device for callbacks
  2488. * @groups: NULL-terminated list of attribute groups to be created
  2489. * @fmt: string for the device's name
  2490. *
  2491. * This function can be used by char device classes. A struct device
  2492. * will be created in sysfs, registered to the specified class.
  2493. * Additional attributes specified in the groups parameter will also
  2494. * be created automatically.
  2495. *
  2496. * A "dev" file will be created, showing the dev_t for the device, if
  2497. * the dev_t is not 0,0.
  2498. * If a pointer to a parent struct device is passed in, the newly created
  2499. * struct device will be a child of that device in sysfs.
  2500. * The pointer to the struct device will be returned from the call.
  2501. * Any further sysfs files that might be required can be created using this
  2502. * pointer.
  2503. *
  2504. * Returns &struct device pointer on success, or ERR_PTR() on error.
  2505. *
  2506. * Note: the struct class passed to this function must have previously
  2507. * been created with a call to class_create().
  2508. */
  2509. struct device *device_create_with_groups(struct class *class,
  2510. struct device *parent, dev_t devt,
  2511. void *drvdata,
  2512. const struct attribute_group **groups,
  2513. const char *fmt, ...)
  2514. {
  2515. va_list vargs;
  2516. struct device *dev;
  2517. va_start(vargs, fmt);
  2518. dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
  2519. fmt, vargs);
  2520. va_end(vargs);
  2521. return dev;
  2522. }
  2523. EXPORT_SYMBOL_GPL(device_create_with_groups);
  2524. static int __match_devt(struct device *dev, const void *data)
  2525. {
  2526. const dev_t *devt = data;
  2527. return dev->devt == *devt;
  2528. }
  2529. /**
  2530. * device_destroy - removes a device that was created with device_create()
  2531. * @class: pointer to the struct class that this device was registered with
  2532. * @devt: the dev_t of the device that was previously registered
  2533. *
  2534. * This call unregisters and cleans up a device that was created with a
  2535. * call to device_create().
  2536. */
  2537. void device_destroy(struct class *class, dev_t devt)
  2538. {
  2539. struct device *dev;
  2540. dev = class_find_device(class, NULL, &devt, __match_devt);
  2541. if (dev) {
  2542. put_device(dev);
  2543. device_unregister(dev);
  2544. }
  2545. }
  2546. EXPORT_SYMBOL_GPL(device_destroy);
  2547. /**
  2548. * device_rename - renames a device
  2549. * @dev: the pointer to the struct device to be renamed
  2550. * @new_name: the new name of the device
  2551. *
  2552. * It is the responsibility of the caller to provide mutual
  2553. * exclusion between two different calls of device_rename
  2554. * on the same device to ensure that new_name is valid and
  2555. * won't conflict with other devices.
  2556. *
  2557. * Note: Don't call this function. Currently, the networking layer calls this
  2558. * function, but that will change. The following text from Kay Sievers offers
  2559. * some insight:
  2560. *
  2561. * Renaming devices is racy at many levels, symlinks and other stuff are not
  2562. * replaced atomically, and you get a "move" uevent, but it's not easy to
  2563. * connect the event to the old and new device. Device nodes are not renamed at
  2564. * all, there isn't even support for that in the kernel now.
  2565. *
  2566. * In the meantime, during renaming, your target name might be taken by another
  2567. * driver, creating conflicts. Or the old name is taken directly after you
  2568. * renamed it -- then you get events for the same DEVPATH, before you even see
  2569. * the "move" event. It's just a mess, and nothing new should ever rely on
  2570. * kernel device renaming. Besides that, it's not even implemented now for
  2571. * other things than (driver-core wise very simple) network devices.
  2572. *
  2573. * We are currently about to change network renaming in udev to completely
  2574. * disallow renaming of devices in the same namespace as the kernel uses,
  2575. * because we can't solve the problems properly, that arise with swapping names
  2576. * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
  2577. * be allowed to some other name than eth[0-9]*, for the aforementioned
  2578. * reasons.
  2579. *
  2580. * Make up a "real" name in the driver before you register anything, or add
  2581. * some other attributes for userspace to find the device, or use udev to add
  2582. * symlinks -- but never rename kernel devices later, it's a complete mess. We
  2583. * don't even want to get into that and try to implement the missing pieces in
  2584. * the core. We really have other pieces to fix in the driver core mess. :)
  2585. */
  2586. int device_rename(struct device *dev, const char *new_name)
  2587. {
  2588. struct kobject *kobj = &dev->kobj;
  2589. char *old_device_name = NULL;
  2590. int error;
  2591. dev = get_device(dev);
  2592. if (!dev)
  2593. return -EINVAL;
  2594. dev_dbg(dev, "renaming to %s\n", new_name);
  2595. old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
  2596. if (!old_device_name) {
  2597. error = -ENOMEM;
  2598. goto out;
  2599. }
  2600. if (dev->class) {
  2601. error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
  2602. kobj, old_device_name,
  2603. new_name, kobject_namespace(kobj));
  2604. if (error)
  2605. goto out;
  2606. }
  2607. error = kobject_rename(kobj, new_name);
  2608. if (error)
  2609. goto out;
  2610. out:
  2611. put_device(dev);
  2612. kfree(old_device_name);
  2613. return error;
  2614. }
  2615. EXPORT_SYMBOL_GPL(device_rename);
  2616. static int device_move_class_links(struct device *dev,
  2617. struct device *old_parent,
  2618. struct device *new_parent)
  2619. {
  2620. int error = 0;
  2621. if (old_parent)
  2622. sysfs_remove_link(&dev->kobj, "device");
  2623. if (new_parent)
  2624. error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
  2625. "device");
  2626. return error;
  2627. }
  2628. /**
  2629. * device_move - moves a device to a new parent
  2630. * @dev: the pointer to the struct device to be moved
  2631. * @new_parent: the new parent of the device (can be NULL)
  2632. * @dpm_order: how to reorder the dpm_list
  2633. */
  2634. int device_move(struct device *dev, struct device *new_parent,
  2635. enum dpm_order dpm_order)
  2636. {
  2637. int error;
  2638. struct device *old_parent;
  2639. struct kobject *new_parent_kobj;
  2640. dev = get_device(dev);
  2641. if (!dev)
  2642. return -EINVAL;
  2643. device_pm_lock();
  2644. new_parent = get_device(new_parent);
  2645. new_parent_kobj = get_device_parent(dev, new_parent);
  2646. if (IS_ERR(new_parent_kobj)) {
  2647. error = PTR_ERR(new_parent_kobj);
  2648. put_device(new_parent);
  2649. goto out;
  2650. }
  2651. pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
  2652. __func__, new_parent ? dev_name(new_parent) : "<NULL>");
  2653. error = kobject_move(&dev->kobj, new_parent_kobj);
  2654. if (error) {
  2655. cleanup_glue_dir(dev, new_parent_kobj);
  2656. put_device(new_parent);
  2657. goto out;
  2658. }
  2659. old_parent = dev->parent;
  2660. dev->parent = new_parent;
  2661. if (old_parent)
  2662. klist_remove(&dev->p->knode_parent);
  2663. if (new_parent) {
  2664. klist_add_tail(&dev->p->knode_parent,
  2665. &new_parent->p->klist_children);
  2666. set_dev_node(dev, dev_to_node(new_parent));
  2667. }
  2668. if (dev->class) {
  2669. error = device_move_class_links(dev, old_parent, new_parent);
  2670. if (error) {
  2671. /* We ignore errors on cleanup since we're hosed anyway... */
  2672. device_move_class_links(dev, new_parent, old_parent);
  2673. if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
  2674. if (new_parent)
  2675. klist_remove(&dev->p->knode_parent);
  2676. dev->parent = old_parent;
  2677. if (old_parent) {
  2678. klist_add_tail(&dev->p->knode_parent,
  2679. &old_parent->p->klist_children);
  2680. set_dev_node(dev, dev_to_node(old_parent));
  2681. }
  2682. }
  2683. cleanup_glue_dir(dev, new_parent_kobj);
  2684. put_device(new_parent);
  2685. goto out;
  2686. }
  2687. }
  2688. switch (dpm_order) {
  2689. case DPM_ORDER_NONE:
  2690. break;
  2691. case DPM_ORDER_DEV_AFTER_PARENT:
  2692. device_pm_move_after(dev, new_parent);
  2693. devices_kset_move_after(dev, new_parent);
  2694. break;
  2695. case DPM_ORDER_PARENT_BEFORE_DEV:
  2696. device_pm_move_before(new_parent, dev);
  2697. devices_kset_move_before(new_parent, dev);
  2698. break;
  2699. case DPM_ORDER_DEV_LAST:
  2700. device_pm_move_last(dev);
  2701. devices_kset_move_last(dev);
  2702. break;
  2703. }
  2704. put_device(old_parent);
  2705. out:
  2706. device_pm_unlock();
  2707. put_device(dev);
  2708. return error;
  2709. }
  2710. EXPORT_SYMBOL_GPL(device_move);
  2711. /**
  2712. * device_shutdown - call ->shutdown() on each device to shutdown.
  2713. */
  2714. void device_shutdown(void)
  2715. {
  2716. struct device *dev, *parent;
  2717. wait_for_device_probe();
  2718. device_block_probing();
  2719. cpufreq_suspend();
  2720. spin_lock(&devices_kset->list_lock);
  2721. /*
  2722. * Walk the devices list backward, shutting down each in turn.
  2723. * Beware that device unplug events may also start pulling
  2724. * devices offline, even as the system is shutting down.
  2725. */
  2726. while (!list_empty(&devices_kset->list)) {
  2727. dev = list_entry(devices_kset->list.prev, struct device,
  2728. kobj.entry);
  2729. /*
  2730. * hold reference count of device's parent to
  2731. * prevent it from being freed because parent's
  2732. * lock is to be held
  2733. */
  2734. parent = get_device(dev->parent);
  2735. get_device(dev);
  2736. /*
  2737. * Make sure the device is off the kset list, in the
  2738. * event that dev->*->shutdown() doesn't remove it.
  2739. */
  2740. list_del_init(&dev->kobj.entry);
  2741. spin_unlock(&devices_kset->list_lock);
  2742. /* hold lock to avoid race with probe/release */
  2743. if (parent)
  2744. device_lock(parent);
  2745. device_lock(dev);
  2746. /* Don't allow any more runtime suspends */
  2747. pm_runtime_get_noresume(dev);
  2748. pm_runtime_barrier(dev);
  2749. if (dev->class && dev->class->shutdown_pre) {
  2750. if (initcall_debug)
  2751. dev_info(dev, "shutdown_pre\n");
  2752. dev->class->shutdown_pre(dev);
  2753. }
  2754. if (dev->bus && dev->bus->shutdown) {
  2755. if (initcall_debug)
  2756. dev_info(dev, "shutdown\n");
  2757. dev->bus->shutdown(dev);
  2758. } else if (dev->driver && dev->driver->shutdown) {
  2759. if (initcall_debug)
  2760. dev_info(dev, "shutdown\n");
  2761. dev->driver->shutdown(dev);
  2762. }
  2763. device_unlock(dev);
  2764. if (parent)
  2765. device_unlock(parent);
  2766. put_device(dev);
  2767. put_device(parent);
  2768. spin_lock(&devices_kset->list_lock);
  2769. }
  2770. spin_unlock(&devices_kset->list_lock);
  2771. }
  2772. /*
  2773. * Device logging functions
  2774. */
  2775. #ifdef CONFIG_PRINTK
  2776. static int
  2777. create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
  2778. {
  2779. const char *subsys;
  2780. size_t pos = 0;
  2781. if (dev->class)
  2782. subsys = dev->class->name;
  2783. else if (dev->bus)
  2784. subsys = dev->bus->name;
  2785. else
  2786. return 0;
  2787. pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
  2788. if (pos >= hdrlen)
  2789. goto overflow;
  2790. /*
  2791. * Add device identifier DEVICE=:
  2792. * b12:8 block dev_t
  2793. * c127:3 char dev_t
  2794. * n8 netdev ifindex
  2795. * +sound:card0 subsystem:devname
  2796. */
  2797. if (MAJOR(dev->devt)) {
  2798. char c;
  2799. if (strcmp(subsys, "block") == 0)
  2800. c = 'b';
  2801. else
  2802. c = 'c';
  2803. pos++;
  2804. pos += snprintf(hdr + pos, hdrlen - pos,
  2805. "DEVICE=%c%u:%u",
  2806. c, MAJOR(dev->devt), MINOR(dev->devt));
  2807. } else if (strcmp(subsys, "net") == 0) {
  2808. struct net_device *net = to_net_dev(dev);
  2809. pos++;
  2810. pos += snprintf(hdr + pos, hdrlen - pos,
  2811. "DEVICE=n%u", net->ifindex);
  2812. } else {
  2813. pos++;
  2814. pos += snprintf(hdr + pos, hdrlen - pos,
  2815. "DEVICE=+%s:%s", subsys, dev_name(dev));
  2816. }
  2817. if (pos >= hdrlen)
  2818. goto overflow;
  2819. return pos;
  2820. overflow:
  2821. dev_WARN(dev, "device/subsystem name too long");
  2822. return 0;
  2823. }
  2824. int dev_vprintk_emit(int level, const struct device *dev,
  2825. const char *fmt, va_list args)
  2826. {
  2827. char hdr[128];
  2828. size_t hdrlen;
  2829. hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
  2830. return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
  2831. }
  2832. EXPORT_SYMBOL(dev_vprintk_emit);
  2833. int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
  2834. {
  2835. va_list args;
  2836. int r;
  2837. va_start(args, fmt);
  2838. r = dev_vprintk_emit(level, dev, fmt, args);
  2839. va_end(args);
  2840. return r;
  2841. }
  2842. EXPORT_SYMBOL(dev_printk_emit);
  2843. static void __dev_printk(const char *level, const struct device *dev,
  2844. struct va_format *vaf)
  2845. {
  2846. if (dev)
  2847. dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
  2848. dev_driver_string(dev), dev_name(dev), vaf);
  2849. else
  2850. printk("%s(NULL device *): %pV", level, vaf);
  2851. }
  2852. void dev_printk(const char *level, const struct device *dev,
  2853. const char *fmt, ...)
  2854. {
  2855. struct va_format vaf;
  2856. va_list args;
  2857. va_start(args, fmt);
  2858. vaf.fmt = fmt;
  2859. vaf.va = &args;
  2860. __dev_printk(level, dev, &vaf);
  2861. va_end(args);
  2862. }
  2863. EXPORT_SYMBOL(dev_printk);
  2864. #define define_dev_printk_level(func, kern_level) \
  2865. void func(const struct device *dev, const char *fmt, ...) \
  2866. { \
  2867. struct va_format vaf; \
  2868. va_list args; \
  2869. \
  2870. va_start(args, fmt); \
  2871. \
  2872. vaf.fmt = fmt; \
  2873. vaf.va = &args; \
  2874. \
  2875. __dev_printk(kern_level, dev, &vaf); \
  2876. \
  2877. va_end(args); \
  2878. } \
  2879. EXPORT_SYMBOL(func);
  2880. define_dev_printk_level(_dev_emerg, KERN_EMERG);
  2881. define_dev_printk_level(_dev_alert, KERN_ALERT);
  2882. define_dev_printk_level(_dev_crit, KERN_CRIT);
  2883. define_dev_printk_level(_dev_err, KERN_ERR);
  2884. define_dev_printk_level(_dev_warn, KERN_WARNING);
  2885. define_dev_printk_level(_dev_notice, KERN_NOTICE);
  2886. define_dev_printk_level(_dev_info, KERN_INFO);
  2887. #endif
  2888. static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
  2889. {
  2890. return fwnode && !IS_ERR(fwnode->secondary);
  2891. }
  2892. /**
  2893. * set_primary_fwnode - Change the primary firmware node of a given device.
  2894. * @dev: Device to handle.
  2895. * @fwnode: New primary firmware node of the device.
  2896. *
  2897. * Set the device's firmware node pointer to @fwnode, but if a secondary
  2898. * firmware node of the device is present, preserve it.
  2899. */
  2900. void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
  2901. {
  2902. struct device *parent = dev->parent;
  2903. struct fwnode_handle *fn = dev->fwnode;
  2904. if (fwnode) {
  2905. if (fwnode_is_primary(fn))
  2906. fn = fn->secondary;
  2907. if (fn) {
  2908. WARN_ON(fwnode->secondary);
  2909. fwnode->secondary = fn;
  2910. }
  2911. dev->fwnode = fwnode;
  2912. } else {
  2913. if (fwnode_is_primary(fn)) {
  2914. dev->fwnode = fn->secondary;
  2915. if (!(parent && fn == parent->fwnode))
  2916. fn->secondary = NULL;
  2917. } else {
  2918. dev->fwnode = NULL;
  2919. }
  2920. }
  2921. }
  2922. EXPORT_SYMBOL_GPL(set_primary_fwnode);
  2923. /**
  2924. * set_secondary_fwnode - Change the secondary firmware node of a given device.
  2925. * @dev: Device to handle.
  2926. * @fwnode: New secondary firmware node of the device.
  2927. *
  2928. * If a primary firmware node of the device is present, set its secondary
  2929. * pointer to @fwnode. Otherwise, set the device's firmware node pointer to
  2930. * @fwnode.
  2931. */
  2932. void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
  2933. {
  2934. if (fwnode)
  2935. fwnode->secondary = ERR_PTR(-ENODEV);
  2936. if (fwnode_is_primary(dev->fwnode))
  2937. dev->fwnode->secondary = fwnode;
  2938. else
  2939. dev->fwnode = fwnode;
  2940. }
  2941. /**
  2942. * device_set_of_node_from_dev - reuse device-tree node of another device
  2943. * @dev: device whose device-tree node is being set
  2944. * @dev2: device whose device-tree node is being reused
  2945. *
  2946. * Takes another reference to the new device-tree node after first dropping
  2947. * any reference held to the old node.
  2948. */
  2949. void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
  2950. {
  2951. of_node_put(dev->of_node);
  2952. dev->of_node = of_node_get(dev2->of_node);
  2953. dev->of_node_reused = true;
  2954. }
  2955. EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);