namespace_devs.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/kstrtox.h>
  6. #include <linux/module.h>
  7. #include <linux/device.h>
  8. #include <linux/sort.h>
  9. #include <linux/slab.h>
  10. #include <linux/list.h>
  11. #include <linux/nd.h>
  12. #include "nd-core.h"
  13. #include "pmem.h"
  14. #include "pfn.h"
  15. #include "nd.h"
  16. static void namespace_io_release(struct device *dev)
  17. {
  18. struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
  19. kfree(nsio);
  20. }
  21. static void namespace_pmem_release(struct device *dev)
  22. {
  23. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  24. struct nd_region *nd_region = to_nd_region(dev->parent);
  25. if (nspm->id >= 0)
  26. ida_free(&nd_region->ns_ida, nspm->id);
  27. kfree(nspm->alt_name);
  28. kfree(nspm->uuid);
  29. kfree(nspm);
  30. }
  31. static bool is_namespace_pmem(const struct device *dev);
  32. static bool is_namespace_io(const struct device *dev);
  33. static int is_uuid_busy(struct device *dev, void *data)
  34. {
  35. uuid_t *uuid1 = data, *uuid2 = NULL;
  36. if (is_namespace_pmem(dev)) {
  37. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  38. uuid2 = nspm->uuid;
  39. } else if (is_nd_btt(dev)) {
  40. struct nd_btt *nd_btt = to_nd_btt(dev);
  41. uuid2 = nd_btt->uuid;
  42. } else if (is_nd_pfn(dev)) {
  43. struct nd_pfn *nd_pfn = to_nd_pfn(dev);
  44. uuid2 = nd_pfn->uuid;
  45. }
  46. if (uuid2 && uuid_equal(uuid1, uuid2))
  47. return -EBUSY;
  48. return 0;
  49. }
  50. static int is_namespace_uuid_busy(struct device *dev, void *data)
  51. {
  52. if (is_nd_region(dev))
  53. return device_for_each_child(dev, data, is_uuid_busy);
  54. return 0;
  55. }
  56. /**
  57. * nd_is_uuid_unique - verify that no other namespace has @uuid
  58. * @dev: any device on a nvdimm_bus
  59. * @uuid: uuid to check
  60. *
  61. * Returns: %true if the uuid is unique, %false if not
  62. */
  63. bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
  64. {
  65. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  66. if (!nvdimm_bus)
  67. return false;
  68. WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
  69. if (device_for_each_child(&nvdimm_bus->dev, uuid,
  70. is_namespace_uuid_busy) != 0)
  71. return false;
  72. return true;
  73. }
  74. bool pmem_should_map_pages(struct device *dev)
  75. {
  76. struct nd_region *nd_region = to_nd_region(dev->parent);
  77. struct nd_namespace_common *ndns = to_ndns(dev);
  78. struct nd_namespace_io *nsio;
  79. if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
  80. return false;
  81. if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
  82. return false;
  83. if (is_nd_pfn(dev) || is_nd_btt(dev))
  84. return false;
  85. if (ndns->force_raw)
  86. return false;
  87. nsio = to_nd_namespace_io(dev);
  88. if (region_intersects(nsio->res.start, resource_size(&nsio->res),
  89. IORESOURCE_SYSTEM_RAM,
  90. IORES_DESC_NONE) == REGION_MIXED)
  91. return false;
  92. return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
  93. }
  94. EXPORT_SYMBOL(pmem_should_map_pages);
  95. unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
  96. {
  97. if (is_namespace_pmem(&ndns->dev)) {
  98. struct nd_namespace_pmem *nspm;
  99. nspm = to_nd_namespace_pmem(&ndns->dev);
  100. if (nspm->lbasize == 0 || nspm->lbasize == 512)
  101. /* default */;
  102. else if (nspm->lbasize == 4096)
  103. return 4096;
  104. else
  105. dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
  106. nspm->lbasize);
  107. }
  108. /*
  109. * There is no namespace label (is_namespace_io()), or the label
  110. * indicates the default sector size.
  111. */
  112. return 512;
  113. }
  114. EXPORT_SYMBOL(pmem_sector_size);
  115. const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
  116. char *name)
  117. {
  118. struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
  119. const char *suffix = NULL;
  120. if (ndns->claim && is_nd_btt(ndns->claim))
  121. suffix = "s";
  122. if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
  123. int nsidx = 0;
  124. if (is_namespace_pmem(&ndns->dev)) {
  125. struct nd_namespace_pmem *nspm;
  126. nspm = to_nd_namespace_pmem(&ndns->dev);
  127. nsidx = nspm->id;
  128. }
  129. if (nsidx)
  130. sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
  131. suffix ? suffix : "");
  132. else
  133. sprintf(name, "pmem%d%s", nd_region->id,
  134. suffix ? suffix : "");
  135. } else {
  136. return NULL;
  137. }
  138. return name;
  139. }
  140. EXPORT_SYMBOL(nvdimm_namespace_disk_name);
  141. const uuid_t *nd_dev_to_uuid(struct device *dev)
  142. {
  143. if (dev && is_namespace_pmem(dev)) {
  144. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  145. return nspm->uuid;
  146. }
  147. return &uuid_null;
  148. }
  149. EXPORT_SYMBOL(nd_dev_to_uuid);
  150. static ssize_t nstype_show(struct device *dev,
  151. struct device_attribute *attr, char *buf)
  152. {
  153. struct nd_region *nd_region = to_nd_region(dev->parent);
  154. return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
  155. }
  156. static DEVICE_ATTR_RO(nstype);
  157. static ssize_t __alt_name_store(struct device *dev, const char *buf,
  158. const size_t len)
  159. {
  160. char *input, *pos, *alt_name, **ns_altname;
  161. ssize_t rc;
  162. if (is_namespace_pmem(dev)) {
  163. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  164. ns_altname = &nspm->alt_name;
  165. } else
  166. return -ENXIO;
  167. if (dev->driver || to_ndns(dev)->claim)
  168. return -EBUSY;
  169. input = kstrndup(buf, len, GFP_KERNEL);
  170. if (!input)
  171. return -ENOMEM;
  172. pos = strim(input);
  173. if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
  174. rc = -EINVAL;
  175. goto out;
  176. }
  177. alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
  178. if (!alt_name) {
  179. rc = -ENOMEM;
  180. goto out;
  181. }
  182. kfree(*ns_altname);
  183. *ns_altname = alt_name;
  184. sprintf(*ns_altname, "%s", pos);
  185. rc = len;
  186. out:
  187. kfree(input);
  188. return rc;
  189. }
  190. static int nd_namespace_label_update(struct nd_region *nd_region,
  191. struct device *dev)
  192. {
  193. dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
  194. "namespace must be idle during label update\n");
  195. if (dev->driver || to_ndns(dev)->claim)
  196. return 0;
  197. /*
  198. * Only allow label writes that will result in a valid namespace
  199. * or deletion of an existing namespace.
  200. */
  201. if (is_namespace_pmem(dev)) {
  202. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  203. resource_size_t size = resource_size(&nspm->nsio.res);
  204. if (size == 0 && nspm->uuid)
  205. /* delete allocation */;
  206. else if (!nspm->uuid)
  207. return 0;
  208. return nd_pmem_namespace_label_update(nd_region, nspm, size);
  209. } else
  210. return -ENXIO;
  211. }
  212. static ssize_t alt_name_store(struct device *dev,
  213. struct device_attribute *attr, const char *buf, size_t len)
  214. {
  215. struct nd_region *nd_region = to_nd_region(dev->parent);
  216. ssize_t rc;
  217. device_lock(dev);
  218. nvdimm_bus_lock(dev);
  219. wait_nvdimm_bus_probe_idle(dev);
  220. rc = __alt_name_store(dev, buf, len);
  221. if (rc >= 0)
  222. rc = nd_namespace_label_update(nd_region, dev);
  223. dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
  224. nvdimm_bus_unlock(dev);
  225. device_unlock(dev);
  226. return rc < 0 ? rc : len;
  227. }
  228. static ssize_t alt_name_show(struct device *dev,
  229. struct device_attribute *attr, char *buf)
  230. {
  231. char *ns_altname;
  232. if (is_namespace_pmem(dev)) {
  233. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  234. ns_altname = nspm->alt_name;
  235. } else
  236. return -ENXIO;
  237. return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
  238. }
  239. static DEVICE_ATTR_RW(alt_name);
  240. static int scan_free(struct nd_region *nd_region,
  241. struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
  242. resource_size_t n)
  243. {
  244. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  245. int rc = 0;
  246. while (n) {
  247. struct resource *res, *last;
  248. last = NULL;
  249. for_each_dpa_resource(ndd, res)
  250. if (strcmp(res->name, label_id->id) == 0)
  251. last = res;
  252. res = last;
  253. if (!res)
  254. return 0;
  255. if (n >= resource_size(res)) {
  256. n -= resource_size(res);
  257. nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
  258. nvdimm_free_dpa(ndd, res);
  259. /* retry with last resource deleted */
  260. continue;
  261. }
  262. rc = adjust_resource(res, res->start, resource_size(res) - n);
  263. if (rc == 0)
  264. res->flags |= DPA_RESOURCE_ADJUSTED;
  265. nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
  266. break;
  267. }
  268. return rc;
  269. }
  270. /**
  271. * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
  272. * @nd_region: the set of dimms to reclaim @n bytes from
  273. * @label_id: unique identifier for the namespace consuming this dpa range
  274. * @n: number of bytes per-dimm to release
  275. *
  276. * Assumes resources are ordered. Starting from the end try to
  277. * adjust_resource() the allocation to @n, but if @n is larger than the
  278. * allocation delete it and find the 'new' last allocation in the label
  279. * set.
  280. *
  281. * Returns: %0 on success on -errno on error
  282. */
  283. static int shrink_dpa_allocation(struct nd_region *nd_region,
  284. struct nd_label_id *label_id, resource_size_t n)
  285. {
  286. int i;
  287. for (i = 0; i < nd_region->ndr_mappings; i++) {
  288. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  289. int rc;
  290. rc = scan_free(nd_region, nd_mapping, label_id, n);
  291. if (rc)
  292. return rc;
  293. }
  294. return 0;
  295. }
  296. static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
  297. struct nd_region *nd_region, struct nd_mapping *nd_mapping,
  298. resource_size_t n)
  299. {
  300. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  301. struct resource *res;
  302. int rc = 0;
  303. /* first resource allocation for this label-id or dimm */
  304. res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
  305. if (!res)
  306. rc = -EBUSY;
  307. nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
  308. return rc ? n : 0;
  309. }
  310. /**
  311. * space_valid() - validate free dpa space against constraints
  312. * @nd_region: hosting region of the free space
  313. * @ndd: dimm device data for debug
  314. * @label_id: namespace id to allocate space
  315. * @prev: potential allocation that precedes free space
  316. * @next: allocation that follows the given free space range
  317. * @exist: first allocation with same id in the mapping
  318. * @n: range that must satisfied for pmem allocations
  319. * @valid: free space range to validate
  320. *
  321. * BLK-space is valid as long as it does not precede a PMEM
  322. * allocation in a given region. PMEM-space must be contiguous
  323. * and adjacent to an existing allocation (if one
  324. * exists). If reserving PMEM any space is valid.
  325. */
  326. static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
  327. struct nd_label_id *label_id, struct resource *prev,
  328. struct resource *next, struct resource *exist,
  329. resource_size_t n, struct resource *valid)
  330. {
  331. bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
  332. unsigned long align;
  333. align = nd_region->align / nd_region->ndr_mappings;
  334. valid->start = ALIGN(valid->start, align);
  335. valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
  336. if (valid->start >= valid->end)
  337. goto invalid;
  338. if (is_reserve)
  339. return;
  340. /* allocation needs to be contiguous, so this is all or nothing */
  341. if (resource_size(valid) < n)
  342. goto invalid;
  343. /* we've got all the space we need and no existing allocation */
  344. if (!exist)
  345. return;
  346. /* allocation needs to be contiguous with the existing namespace */
  347. if (valid->start == exist->end + 1
  348. || valid->end == exist->start - 1)
  349. return;
  350. invalid:
  351. /* truncate @valid size to 0 */
  352. valid->end = valid->start - 1;
  353. }
  354. enum alloc_loc {
  355. ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
  356. };
  357. static resource_size_t scan_allocate(struct nd_region *nd_region,
  358. struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
  359. resource_size_t n)
  360. {
  361. resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
  362. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  363. struct resource *res, *exist = NULL, valid;
  364. const resource_size_t to_allocate = n;
  365. int first;
  366. for_each_dpa_resource(ndd, res)
  367. if (strcmp(label_id->id, res->name) == 0)
  368. exist = res;
  369. valid.start = nd_mapping->start;
  370. valid.end = mapping_end;
  371. valid.name = "free space";
  372. retry:
  373. first = 0;
  374. for_each_dpa_resource(ndd, res) {
  375. struct resource *next = res->sibling, *new_res = NULL;
  376. resource_size_t allocate, available = 0;
  377. enum alloc_loc loc = ALLOC_ERR;
  378. const char *action;
  379. int rc = 0;
  380. /* ignore resources outside this nd_mapping */
  381. if (res->start > mapping_end)
  382. continue;
  383. if (res->end < nd_mapping->start)
  384. continue;
  385. /* space at the beginning of the mapping */
  386. if (!first++ && res->start > nd_mapping->start) {
  387. valid.start = nd_mapping->start;
  388. valid.end = res->start - 1;
  389. space_valid(nd_region, ndd, label_id, NULL, next, exist,
  390. to_allocate, &valid);
  391. available = resource_size(&valid);
  392. if (available)
  393. loc = ALLOC_BEFORE;
  394. }
  395. /* space between allocations */
  396. if (!loc && next) {
  397. valid.start = res->start + resource_size(res);
  398. valid.end = min(mapping_end, next->start - 1);
  399. space_valid(nd_region, ndd, label_id, res, next, exist,
  400. to_allocate, &valid);
  401. available = resource_size(&valid);
  402. if (available)
  403. loc = ALLOC_MID;
  404. }
  405. /* space at the end of the mapping */
  406. if (!loc && !next) {
  407. valid.start = res->start + resource_size(res);
  408. valid.end = mapping_end;
  409. space_valid(nd_region, ndd, label_id, res, next, exist,
  410. to_allocate, &valid);
  411. available = resource_size(&valid);
  412. if (available)
  413. loc = ALLOC_AFTER;
  414. }
  415. if (!loc || !available)
  416. continue;
  417. allocate = min(available, n);
  418. switch (loc) {
  419. case ALLOC_BEFORE:
  420. if (strcmp(res->name, label_id->id) == 0) {
  421. /* adjust current resource up */
  422. rc = adjust_resource(res, res->start - allocate,
  423. resource_size(res) + allocate);
  424. action = "cur grow up";
  425. } else
  426. action = "allocate";
  427. break;
  428. case ALLOC_MID:
  429. if (strcmp(next->name, label_id->id) == 0) {
  430. /* adjust next resource up */
  431. rc = adjust_resource(next, next->start
  432. - allocate, resource_size(next)
  433. + allocate);
  434. new_res = next;
  435. action = "next grow up";
  436. } else if (strcmp(res->name, label_id->id) == 0) {
  437. action = "grow down";
  438. } else
  439. action = "allocate";
  440. break;
  441. case ALLOC_AFTER:
  442. if (strcmp(res->name, label_id->id) == 0)
  443. action = "grow down";
  444. else
  445. action = "allocate";
  446. break;
  447. default:
  448. return n;
  449. }
  450. if (strcmp(action, "allocate") == 0) {
  451. new_res = nvdimm_allocate_dpa(ndd, label_id,
  452. valid.start, allocate);
  453. if (!new_res)
  454. rc = -EBUSY;
  455. } else if (strcmp(action, "grow down") == 0) {
  456. /* adjust current resource down */
  457. rc = adjust_resource(res, res->start, resource_size(res)
  458. + allocate);
  459. if (rc == 0)
  460. res->flags |= DPA_RESOURCE_ADJUSTED;
  461. }
  462. if (!new_res)
  463. new_res = res;
  464. nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
  465. action, loc, rc);
  466. if (rc)
  467. return n;
  468. n -= allocate;
  469. if (n) {
  470. /*
  471. * Retry scan with newly inserted resources.
  472. * For example, if we did an ALLOC_BEFORE
  473. * insertion there may also have been space
  474. * available for an ALLOC_AFTER insertion, so we
  475. * need to check this same resource again
  476. */
  477. goto retry;
  478. } else
  479. return 0;
  480. }
  481. if (n == to_allocate)
  482. return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
  483. return n;
  484. }
  485. static int merge_dpa(struct nd_region *nd_region,
  486. struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
  487. {
  488. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  489. struct resource *res;
  490. if (strncmp("pmem", label_id->id, 4) == 0)
  491. return 0;
  492. retry:
  493. for_each_dpa_resource(ndd, res) {
  494. int rc;
  495. struct resource *next = res->sibling;
  496. resource_size_t end = res->start + resource_size(res);
  497. if (!next || strcmp(res->name, label_id->id) != 0
  498. || strcmp(next->name, label_id->id) != 0
  499. || end != next->start)
  500. continue;
  501. end += resource_size(next);
  502. nvdimm_free_dpa(ndd, next);
  503. rc = adjust_resource(res, res->start, end - res->start);
  504. nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
  505. if (rc)
  506. return rc;
  507. res->flags |= DPA_RESOURCE_ADJUSTED;
  508. goto retry;
  509. }
  510. return 0;
  511. }
  512. int __reserve_free_pmem(struct device *dev, void *data)
  513. {
  514. struct nvdimm *nvdimm = data;
  515. struct nd_region *nd_region;
  516. struct nd_label_id label_id;
  517. int i;
  518. if (!is_memory(dev))
  519. return 0;
  520. nd_region = to_nd_region(dev);
  521. if (nd_region->ndr_mappings == 0)
  522. return 0;
  523. memset(&label_id, 0, sizeof(label_id));
  524. strcat(label_id.id, "pmem-reserve");
  525. for (i = 0; i < nd_region->ndr_mappings; i++) {
  526. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  527. resource_size_t n, rem = 0;
  528. if (nd_mapping->nvdimm != nvdimm)
  529. continue;
  530. n = nd_pmem_available_dpa(nd_region, nd_mapping);
  531. if (n == 0)
  532. return 0;
  533. rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
  534. dev_WARN_ONCE(&nd_region->dev, rem,
  535. "pmem reserve underrun: %#llx of %#llx bytes\n",
  536. (unsigned long long) n - rem,
  537. (unsigned long long) n);
  538. return rem ? -ENXIO : 0;
  539. }
  540. return 0;
  541. }
  542. void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
  543. struct nd_mapping *nd_mapping)
  544. {
  545. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  546. struct resource *res, *_res;
  547. for_each_dpa_resource_safe(ndd, res, _res)
  548. if (strcmp(res->name, "pmem-reserve") == 0)
  549. nvdimm_free_dpa(ndd, res);
  550. }
  551. /**
  552. * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
  553. * @nd_region: the set of dimms to allocate @n more bytes from
  554. * @label_id: unique identifier for the namespace consuming this dpa range
  555. * @n: number of bytes per-dimm to add to the existing allocation
  556. *
  557. * Assumes resources are ordered. For BLK regions, first consume
  558. * BLK-only available DPA free space, then consume PMEM-aliased DPA
  559. * space starting at the highest DPA. For PMEM regions start
  560. * allocations from the start of an interleave set and end at the first
  561. * BLK allocation or the end of the interleave set, whichever comes
  562. * first.
  563. *
  564. * Returns: %0 on success on -errno on error
  565. */
  566. static int grow_dpa_allocation(struct nd_region *nd_region,
  567. struct nd_label_id *label_id, resource_size_t n)
  568. {
  569. int i;
  570. for (i = 0; i < nd_region->ndr_mappings; i++) {
  571. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  572. resource_size_t rem = n;
  573. int rc;
  574. rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
  575. dev_WARN_ONCE(&nd_region->dev, rem,
  576. "allocation underrun: %#llx of %#llx bytes\n",
  577. (unsigned long long) n - rem,
  578. (unsigned long long) n);
  579. if (rem)
  580. return -ENXIO;
  581. rc = merge_dpa(nd_region, nd_mapping, label_id);
  582. if (rc)
  583. return rc;
  584. }
  585. return 0;
  586. }
  587. static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
  588. struct nd_namespace_pmem *nspm, resource_size_t size)
  589. {
  590. struct resource *res = &nspm->nsio.res;
  591. resource_size_t offset = 0;
  592. if (size && !nspm->uuid) {
  593. WARN_ON_ONCE(1);
  594. size = 0;
  595. }
  596. if (size && nspm->uuid) {
  597. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  598. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  599. struct nd_label_id label_id;
  600. struct resource *res;
  601. if (!ndd) {
  602. size = 0;
  603. goto out;
  604. }
  605. nd_label_gen_id(&label_id, nspm->uuid, 0);
  606. /* calculate a spa offset from the dpa allocation offset */
  607. for_each_dpa_resource(ndd, res)
  608. if (strcmp(res->name, label_id.id) == 0) {
  609. offset = (res->start - nd_mapping->start)
  610. * nd_region->ndr_mappings;
  611. goto out;
  612. }
  613. WARN_ON_ONCE(1);
  614. size = 0;
  615. }
  616. out:
  617. res->start = nd_region->ndr_start + offset;
  618. res->end = res->start + size - 1;
  619. }
  620. static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
  621. const char *where)
  622. {
  623. if (!uuid) {
  624. dev_dbg(dev, "%s: uuid not set\n", where);
  625. return true;
  626. }
  627. return false;
  628. }
  629. static ssize_t __size_store(struct device *dev, unsigned long long val)
  630. {
  631. resource_size_t allocated = 0, available = 0;
  632. struct nd_region *nd_region = to_nd_region(dev->parent);
  633. struct nd_namespace_common *ndns = to_ndns(dev);
  634. struct nd_mapping *nd_mapping;
  635. struct nvdimm_drvdata *ndd;
  636. struct nd_label_id label_id;
  637. u32 flags = 0, remainder;
  638. int rc, i, id = -1;
  639. uuid_t *uuid = NULL;
  640. if (dev->driver || ndns->claim)
  641. return -EBUSY;
  642. if (is_namespace_pmem(dev)) {
  643. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  644. uuid = nspm->uuid;
  645. id = nspm->id;
  646. }
  647. /*
  648. * We need a uuid for the allocation-label and dimm(s) on which
  649. * to store the label.
  650. */
  651. if (uuid_not_set(uuid, dev, __func__))
  652. return -ENXIO;
  653. if (nd_region->ndr_mappings == 0) {
  654. dev_dbg(dev, "not associated with dimm(s)\n");
  655. return -ENXIO;
  656. }
  657. div_u64_rem(val, nd_region->align, &remainder);
  658. if (remainder) {
  659. dev_dbg(dev, "%llu is not %ldK aligned\n", val,
  660. nd_region->align / SZ_1K);
  661. return -EINVAL;
  662. }
  663. nd_label_gen_id(&label_id, uuid, flags);
  664. for (i = 0; i < nd_region->ndr_mappings; i++) {
  665. nd_mapping = &nd_region->mapping[i];
  666. ndd = to_ndd(nd_mapping);
  667. /*
  668. * All dimms in an interleave set, need to be enabled
  669. * for the size to be changed.
  670. */
  671. if (!ndd)
  672. return -ENXIO;
  673. allocated += nvdimm_allocated_dpa(ndd, &label_id);
  674. }
  675. available = nd_region_allocatable_dpa(nd_region);
  676. if (val > available + allocated)
  677. return -ENOSPC;
  678. if (val == allocated)
  679. return 0;
  680. val = div_u64(val, nd_region->ndr_mappings);
  681. allocated = div_u64(allocated, nd_region->ndr_mappings);
  682. if (val < allocated)
  683. rc = shrink_dpa_allocation(nd_region, &label_id,
  684. allocated - val);
  685. else
  686. rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
  687. if (rc)
  688. return rc;
  689. if (is_namespace_pmem(dev)) {
  690. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  691. nd_namespace_pmem_set_resource(nd_region, nspm,
  692. val * nd_region->ndr_mappings);
  693. }
  694. /*
  695. * Try to delete the namespace if we deleted all of its
  696. * allocation, this is not the seed or 0th device for the
  697. * region, and it is not actively claimed by a btt, pfn, or dax
  698. * instance.
  699. */
  700. if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
  701. nd_device_unregister(dev, ND_ASYNC);
  702. return rc;
  703. }
  704. static ssize_t size_store(struct device *dev,
  705. struct device_attribute *attr, const char *buf, size_t len)
  706. {
  707. struct nd_region *nd_region = to_nd_region(dev->parent);
  708. unsigned long long val;
  709. int rc;
  710. rc = kstrtoull(buf, 0, &val);
  711. if (rc)
  712. return rc;
  713. device_lock(dev);
  714. nvdimm_bus_lock(dev);
  715. wait_nvdimm_bus_probe_idle(dev);
  716. rc = __size_store(dev, val);
  717. if (rc >= 0)
  718. rc = nd_namespace_label_update(nd_region, dev);
  719. /* setting size zero == 'delete namespace' */
  720. if (rc == 0 && val == 0 && is_namespace_pmem(dev)) {
  721. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  722. kfree(nspm->uuid);
  723. nspm->uuid = NULL;
  724. }
  725. dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
  726. nvdimm_bus_unlock(dev);
  727. device_unlock(dev);
  728. return rc < 0 ? rc : len;
  729. }
  730. resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
  731. {
  732. struct device *dev = &ndns->dev;
  733. if (is_namespace_pmem(dev)) {
  734. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  735. return resource_size(&nspm->nsio.res);
  736. } else if (is_namespace_io(dev)) {
  737. struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
  738. return resource_size(&nsio->res);
  739. } else
  740. WARN_ONCE(1, "unknown namespace type\n");
  741. return 0;
  742. }
  743. resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
  744. {
  745. resource_size_t size;
  746. nvdimm_bus_lock(&ndns->dev);
  747. size = __nvdimm_namespace_capacity(ndns);
  748. nvdimm_bus_unlock(&ndns->dev);
  749. return size;
  750. }
  751. EXPORT_SYMBOL(nvdimm_namespace_capacity);
  752. bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
  753. {
  754. int i;
  755. bool locked = false;
  756. struct device *dev = &ndns->dev;
  757. struct nd_region *nd_region = to_nd_region(dev->parent);
  758. for (i = 0; i < nd_region->ndr_mappings; i++) {
  759. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  760. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  761. if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
  762. dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
  763. locked = true;
  764. }
  765. }
  766. return locked;
  767. }
  768. EXPORT_SYMBOL(nvdimm_namespace_locked);
  769. static ssize_t size_show(struct device *dev,
  770. struct device_attribute *attr, char *buf)
  771. {
  772. return sprintf(buf, "%llu\n", (unsigned long long)
  773. nvdimm_namespace_capacity(to_ndns(dev)));
  774. }
  775. static DEVICE_ATTR(size, 0444, size_show, size_store);
  776. static uuid_t *namespace_to_uuid(struct device *dev)
  777. {
  778. if (is_namespace_pmem(dev)) {
  779. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  780. return nspm->uuid;
  781. }
  782. return ERR_PTR(-ENXIO);
  783. }
  784. static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
  785. char *buf)
  786. {
  787. uuid_t *uuid = namespace_to_uuid(dev);
  788. if (IS_ERR(uuid))
  789. return PTR_ERR(uuid);
  790. if (uuid)
  791. return sprintf(buf, "%pUb\n", uuid);
  792. return sprintf(buf, "\n");
  793. }
  794. /**
  795. * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
  796. * @nd_region: parent region so we can updates all dimms in the set
  797. * @dev: namespace type for generating label_id
  798. * @new_uuid: incoming uuid
  799. * @old_uuid: reference to the uuid storage location in the namespace object
  800. *
  801. * Returns: %0 on success on -errno on error
  802. */
  803. static int namespace_update_uuid(struct nd_region *nd_region,
  804. struct device *dev, uuid_t *new_uuid,
  805. uuid_t **old_uuid)
  806. {
  807. struct nd_label_id old_label_id;
  808. struct nd_label_id new_label_id;
  809. int i;
  810. if (!nd_is_uuid_unique(dev, new_uuid))
  811. return -EINVAL;
  812. if (*old_uuid == NULL)
  813. goto out;
  814. /*
  815. * If we've already written a label with this uuid, then it's
  816. * too late to rename because we can't reliably update the uuid
  817. * without losing the old namespace. Userspace must delete this
  818. * namespace to abandon the old uuid.
  819. */
  820. for (i = 0; i < nd_region->ndr_mappings; i++) {
  821. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  822. /*
  823. * This check by itself is sufficient because old_uuid
  824. * would be NULL above if this uuid did not exist in the
  825. * currently written set.
  826. *
  827. * FIXME: can we delete uuid with zero dpa allocated?
  828. */
  829. if (list_empty(&nd_mapping->labels))
  830. return -EBUSY;
  831. }
  832. nd_label_gen_id(&old_label_id, *old_uuid, 0);
  833. nd_label_gen_id(&new_label_id, new_uuid, 0);
  834. for (i = 0; i < nd_region->ndr_mappings; i++) {
  835. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  836. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  837. struct nd_label_ent *label_ent;
  838. struct resource *res;
  839. for_each_dpa_resource(ndd, res)
  840. if (strcmp(res->name, old_label_id.id) == 0)
  841. sprintf((void *) res->name, "%s",
  842. new_label_id.id);
  843. mutex_lock(&nd_mapping->lock);
  844. list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  845. struct nd_namespace_label *nd_label = label_ent->label;
  846. struct nd_label_id label_id;
  847. uuid_t uuid;
  848. if (!nd_label)
  849. continue;
  850. nsl_get_uuid(ndd, nd_label, &uuid);
  851. nd_label_gen_id(&label_id, &uuid,
  852. nsl_get_flags(ndd, nd_label));
  853. if (strcmp(old_label_id.id, label_id.id) == 0)
  854. set_bit(ND_LABEL_REAP, &label_ent->flags);
  855. }
  856. mutex_unlock(&nd_mapping->lock);
  857. }
  858. kfree(*old_uuid);
  859. out:
  860. *old_uuid = new_uuid;
  861. return 0;
  862. }
  863. static ssize_t uuid_store(struct device *dev,
  864. struct device_attribute *attr, const char *buf, size_t len)
  865. {
  866. struct nd_region *nd_region = to_nd_region(dev->parent);
  867. uuid_t *uuid = NULL;
  868. uuid_t **ns_uuid;
  869. ssize_t rc = 0;
  870. if (is_namespace_pmem(dev)) {
  871. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  872. ns_uuid = &nspm->uuid;
  873. } else
  874. return -ENXIO;
  875. device_lock(dev);
  876. nvdimm_bus_lock(dev);
  877. wait_nvdimm_bus_probe_idle(dev);
  878. if (to_ndns(dev)->claim)
  879. rc = -EBUSY;
  880. if (rc >= 0)
  881. rc = nd_uuid_store(dev, &uuid, buf, len);
  882. if (rc >= 0)
  883. rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
  884. if (rc >= 0)
  885. rc = nd_namespace_label_update(nd_region, dev);
  886. else
  887. kfree(uuid);
  888. dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
  889. buf[len - 1] == '\n' ? "" : "\n");
  890. nvdimm_bus_unlock(dev);
  891. device_unlock(dev);
  892. return rc < 0 ? rc : len;
  893. }
  894. static DEVICE_ATTR_RW(uuid);
  895. static ssize_t resource_show(struct device *dev,
  896. struct device_attribute *attr, char *buf)
  897. {
  898. struct resource *res;
  899. if (is_namespace_pmem(dev)) {
  900. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  901. res = &nspm->nsio.res;
  902. } else if (is_namespace_io(dev)) {
  903. struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
  904. res = &nsio->res;
  905. } else
  906. return -ENXIO;
  907. /* no address to convey if the namespace has no allocation */
  908. if (resource_size(res) == 0)
  909. return -ENXIO;
  910. return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
  911. }
  912. static DEVICE_ATTR_ADMIN_RO(resource);
  913. static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
  914. static ssize_t sector_size_show(struct device *dev,
  915. struct device_attribute *attr, char *buf)
  916. {
  917. if (is_namespace_pmem(dev)) {
  918. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  919. return nd_size_select_show(nspm->lbasize,
  920. pmem_lbasize_supported, buf);
  921. }
  922. return -ENXIO;
  923. }
  924. static ssize_t sector_size_store(struct device *dev,
  925. struct device_attribute *attr, const char *buf, size_t len)
  926. {
  927. struct nd_region *nd_region = to_nd_region(dev->parent);
  928. const unsigned long *supported;
  929. unsigned long *lbasize;
  930. ssize_t rc = 0;
  931. if (is_namespace_pmem(dev)) {
  932. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  933. lbasize = &nspm->lbasize;
  934. supported = pmem_lbasize_supported;
  935. } else
  936. return -ENXIO;
  937. device_lock(dev);
  938. nvdimm_bus_lock(dev);
  939. if (to_ndns(dev)->claim)
  940. rc = -EBUSY;
  941. if (rc >= 0)
  942. rc = nd_size_select_store(dev, buf, lbasize, supported);
  943. if (rc >= 0)
  944. rc = nd_namespace_label_update(nd_region, dev);
  945. dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
  946. buf, buf[len - 1] == '\n' ? "" : "\n");
  947. nvdimm_bus_unlock(dev);
  948. device_unlock(dev);
  949. return rc ? rc : len;
  950. }
  951. static DEVICE_ATTR_RW(sector_size);
  952. static ssize_t dpa_extents_show(struct device *dev,
  953. struct device_attribute *attr, char *buf)
  954. {
  955. struct nd_region *nd_region = to_nd_region(dev->parent);
  956. struct nd_label_id label_id;
  957. uuid_t *uuid = NULL;
  958. int count = 0, i;
  959. u32 flags = 0;
  960. nvdimm_bus_lock(dev);
  961. if (is_namespace_pmem(dev)) {
  962. struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
  963. uuid = nspm->uuid;
  964. flags = 0;
  965. }
  966. if (!uuid)
  967. goto out;
  968. nd_label_gen_id(&label_id, uuid, flags);
  969. for (i = 0; i < nd_region->ndr_mappings; i++) {
  970. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  971. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  972. struct resource *res;
  973. for_each_dpa_resource(ndd, res)
  974. if (strcmp(res->name, label_id.id) == 0)
  975. count++;
  976. }
  977. out:
  978. nvdimm_bus_unlock(dev);
  979. return sprintf(buf, "%d\n", count);
  980. }
  981. static DEVICE_ATTR_RO(dpa_extents);
  982. static int btt_claim_class(struct device *dev)
  983. {
  984. struct nd_region *nd_region = to_nd_region(dev->parent);
  985. int i, loop_bitmask = 0;
  986. for (i = 0; i < nd_region->ndr_mappings; i++) {
  987. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  988. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  989. struct nd_namespace_index *nsindex;
  990. /*
  991. * If any of the DIMMs do not support labels the only
  992. * possible BTT format is v1.
  993. */
  994. if (!ndd) {
  995. loop_bitmask = 0;
  996. break;
  997. }
  998. nsindex = to_namespace_index(ndd, ndd->ns_current);
  999. if (nsindex == NULL)
  1000. loop_bitmask |= 1;
  1001. else {
  1002. /* check whether existing labels are v1.1 or v1.2 */
  1003. if (__le16_to_cpu(nsindex->major) == 1
  1004. && __le16_to_cpu(nsindex->minor) == 1)
  1005. loop_bitmask |= 2;
  1006. else
  1007. loop_bitmask |= 4;
  1008. }
  1009. }
  1010. /*
  1011. * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
  1012. * block is found, a v1.1 label for any mapping will set bit 1, and a
  1013. * v1.2 label will set bit 2.
  1014. *
  1015. * At the end of the loop, at most one of the three bits must be set.
  1016. * If multiple bits were set, it means the different mappings disagree
  1017. * about their labels, and this must be cleaned up first.
  1018. *
  1019. * If all the label index blocks are found to agree, nsindex of NULL
  1020. * implies labels haven't been initialized yet, and when they will,
  1021. * they will be of the 1.2 format, so we can assume BTT2.0
  1022. *
  1023. * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
  1024. * found, we enforce BTT2.0
  1025. *
  1026. * If the loop was never entered, default to BTT1.1 (legacy namespaces)
  1027. */
  1028. switch (loop_bitmask) {
  1029. case 0:
  1030. case 2:
  1031. return NVDIMM_CCLASS_BTT;
  1032. case 1:
  1033. case 4:
  1034. return NVDIMM_CCLASS_BTT2;
  1035. default:
  1036. return -ENXIO;
  1037. }
  1038. }
  1039. static ssize_t holder_show(struct device *dev,
  1040. struct device_attribute *attr, char *buf)
  1041. {
  1042. struct nd_namespace_common *ndns = to_ndns(dev);
  1043. ssize_t rc;
  1044. device_lock(dev);
  1045. rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
  1046. device_unlock(dev);
  1047. return rc;
  1048. }
  1049. static DEVICE_ATTR_RO(holder);
  1050. static int __holder_class_store(struct device *dev, const char *buf)
  1051. {
  1052. struct nd_namespace_common *ndns = to_ndns(dev);
  1053. if (dev->driver || ndns->claim)
  1054. return -EBUSY;
  1055. if (sysfs_streq(buf, "btt")) {
  1056. int rc = btt_claim_class(dev);
  1057. if (rc < NVDIMM_CCLASS_NONE)
  1058. return rc;
  1059. ndns->claim_class = rc;
  1060. } else if (sysfs_streq(buf, "pfn"))
  1061. ndns->claim_class = NVDIMM_CCLASS_PFN;
  1062. else if (sysfs_streq(buf, "dax"))
  1063. ndns->claim_class = NVDIMM_CCLASS_DAX;
  1064. else if (sysfs_streq(buf, ""))
  1065. ndns->claim_class = NVDIMM_CCLASS_NONE;
  1066. else
  1067. return -EINVAL;
  1068. return 0;
  1069. }
  1070. static ssize_t holder_class_store(struct device *dev,
  1071. struct device_attribute *attr, const char *buf, size_t len)
  1072. {
  1073. struct nd_region *nd_region = to_nd_region(dev->parent);
  1074. int rc;
  1075. device_lock(dev);
  1076. nvdimm_bus_lock(dev);
  1077. wait_nvdimm_bus_probe_idle(dev);
  1078. rc = __holder_class_store(dev, buf);
  1079. if (rc >= 0)
  1080. rc = nd_namespace_label_update(nd_region, dev);
  1081. dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
  1082. nvdimm_bus_unlock(dev);
  1083. device_unlock(dev);
  1084. return rc < 0 ? rc : len;
  1085. }
  1086. static ssize_t holder_class_show(struct device *dev,
  1087. struct device_attribute *attr, char *buf)
  1088. {
  1089. struct nd_namespace_common *ndns = to_ndns(dev);
  1090. ssize_t rc;
  1091. device_lock(dev);
  1092. if (ndns->claim_class == NVDIMM_CCLASS_NONE)
  1093. rc = sprintf(buf, "\n");
  1094. else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
  1095. (ndns->claim_class == NVDIMM_CCLASS_BTT2))
  1096. rc = sprintf(buf, "btt\n");
  1097. else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
  1098. rc = sprintf(buf, "pfn\n");
  1099. else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
  1100. rc = sprintf(buf, "dax\n");
  1101. else
  1102. rc = sprintf(buf, "<unknown>\n");
  1103. device_unlock(dev);
  1104. return rc;
  1105. }
  1106. static DEVICE_ATTR_RW(holder_class);
  1107. static ssize_t mode_show(struct device *dev,
  1108. struct device_attribute *attr, char *buf)
  1109. {
  1110. struct nd_namespace_common *ndns = to_ndns(dev);
  1111. struct device *claim;
  1112. char *mode;
  1113. ssize_t rc;
  1114. device_lock(dev);
  1115. claim = ndns->claim;
  1116. if (claim && is_nd_btt(claim))
  1117. mode = "safe";
  1118. else if (claim && is_nd_pfn(claim))
  1119. mode = "memory";
  1120. else if (claim && is_nd_dax(claim))
  1121. mode = "dax";
  1122. else if (!claim && pmem_should_map_pages(dev))
  1123. mode = "memory";
  1124. else
  1125. mode = "raw";
  1126. rc = sprintf(buf, "%s\n", mode);
  1127. device_unlock(dev);
  1128. return rc;
  1129. }
  1130. static DEVICE_ATTR_RO(mode);
  1131. static ssize_t force_raw_store(struct device *dev,
  1132. struct device_attribute *attr, const char *buf, size_t len)
  1133. {
  1134. bool force_raw;
  1135. int rc = kstrtobool(buf, &force_raw);
  1136. if (rc)
  1137. return rc;
  1138. to_ndns(dev)->force_raw = force_raw;
  1139. return len;
  1140. }
  1141. static ssize_t force_raw_show(struct device *dev,
  1142. struct device_attribute *attr, char *buf)
  1143. {
  1144. return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
  1145. }
  1146. static DEVICE_ATTR_RW(force_raw);
  1147. static struct attribute *nd_namespace_attributes[] = {
  1148. &dev_attr_nstype.attr,
  1149. &dev_attr_size.attr,
  1150. &dev_attr_mode.attr,
  1151. &dev_attr_uuid.attr,
  1152. &dev_attr_holder.attr,
  1153. &dev_attr_resource.attr,
  1154. &dev_attr_alt_name.attr,
  1155. &dev_attr_force_raw.attr,
  1156. &dev_attr_sector_size.attr,
  1157. &dev_attr_dpa_extents.attr,
  1158. &dev_attr_holder_class.attr,
  1159. NULL,
  1160. };
  1161. static umode_t namespace_visible(struct kobject *kobj,
  1162. struct attribute *a, int n)
  1163. {
  1164. struct device *dev = container_of(kobj, struct device, kobj);
  1165. if (is_namespace_pmem(dev)) {
  1166. if (a == &dev_attr_size.attr)
  1167. return 0644;
  1168. return a->mode;
  1169. }
  1170. /* base is_namespace_io() attributes */
  1171. if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
  1172. a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
  1173. a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
  1174. a == &dev_attr_resource.attr)
  1175. return a->mode;
  1176. return 0;
  1177. }
  1178. static struct attribute_group nd_namespace_attribute_group = {
  1179. .attrs = nd_namespace_attributes,
  1180. .is_visible = namespace_visible,
  1181. };
  1182. static const struct attribute_group *nd_namespace_attribute_groups[] = {
  1183. &nd_device_attribute_group,
  1184. &nd_namespace_attribute_group,
  1185. &nd_numa_attribute_group,
  1186. NULL,
  1187. };
  1188. static const struct device_type namespace_io_device_type = {
  1189. .name = "nd_namespace_io",
  1190. .release = namespace_io_release,
  1191. .groups = nd_namespace_attribute_groups,
  1192. };
  1193. static const struct device_type namespace_pmem_device_type = {
  1194. .name = "nd_namespace_pmem",
  1195. .release = namespace_pmem_release,
  1196. .groups = nd_namespace_attribute_groups,
  1197. };
  1198. static bool is_namespace_pmem(const struct device *dev)
  1199. {
  1200. return dev ? dev->type == &namespace_pmem_device_type : false;
  1201. }
  1202. static bool is_namespace_io(const struct device *dev)
  1203. {
  1204. return dev ? dev->type == &namespace_io_device_type : false;
  1205. }
  1206. struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
  1207. {
  1208. struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
  1209. struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
  1210. struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
  1211. struct nd_namespace_common *ndns = NULL;
  1212. resource_size_t size;
  1213. if (nd_btt || nd_pfn || nd_dax) {
  1214. if (nd_btt)
  1215. ndns = nd_btt->ndns;
  1216. else if (nd_pfn)
  1217. ndns = nd_pfn->ndns;
  1218. else if (nd_dax)
  1219. ndns = nd_dax->nd_pfn.ndns;
  1220. if (!ndns)
  1221. return ERR_PTR(-ENODEV);
  1222. /*
  1223. * Flush any in-progess probes / removals in the driver
  1224. * for the raw personality of this namespace.
  1225. */
  1226. device_lock(&ndns->dev);
  1227. device_unlock(&ndns->dev);
  1228. if (ndns->dev.driver) {
  1229. dev_dbg(&ndns->dev, "is active, can't bind %s\n",
  1230. dev_name(dev));
  1231. return ERR_PTR(-EBUSY);
  1232. }
  1233. if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
  1234. "host (%s) vs claim (%s) mismatch\n",
  1235. dev_name(dev),
  1236. dev_name(ndns->claim)))
  1237. return ERR_PTR(-ENXIO);
  1238. } else {
  1239. ndns = to_ndns(dev);
  1240. if (ndns->claim) {
  1241. dev_dbg(dev, "claimed by %s, failing probe\n",
  1242. dev_name(ndns->claim));
  1243. return ERR_PTR(-ENXIO);
  1244. }
  1245. }
  1246. if (nvdimm_namespace_locked(ndns))
  1247. return ERR_PTR(-EACCES);
  1248. size = nvdimm_namespace_capacity(ndns);
  1249. if (size < ND_MIN_NAMESPACE_SIZE) {
  1250. dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
  1251. &size, ND_MIN_NAMESPACE_SIZE);
  1252. return ERR_PTR(-ENODEV);
  1253. }
  1254. /*
  1255. * Note, alignment validation for fsdax and devdax mode
  1256. * namespaces happens in nd_pfn_validate() where infoblock
  1257. * padding parameters can be applied.
  1258. */
  1259. if (pmem_should_map_pages(dev)) {
  1260. struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
  1261. struct resource *res = &nsio->res;
  1262. if (!IS_ALIGNED(res->start | (res->end + 1),
  1263. memremap_compat_align())) {
  1264. dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
  1265. return ERR_PTR(-EOPNOTSUPP);
  1266. }
  1267. }
  1268. if (is_namespace_pmem(&ndns->dev)) {
  1269. struct nd_namespace_pmem *nspm;
  1270. nspm = to_nd_namespace_pmem(&ndns->dev);
  1271. if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
  1272. return ERR_PTR(-ENODEV);
  1273. }
  1274. return ndns;
  1275. }
  1276. EXPORT_SYMBOL(nvdimm_namespace_common_probe);
  1277. int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
  1278. resource_size_t size)
  1279. {
  1280. return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
  1281. }
  1282. EXPORT_SYMBOL_GPL(devm_namespace_enable);
  1283. void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
  1284. {
  1285. devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
  1286. }
  1287. EXPORT_SYMBOL_GPL(devm_namespace_disable);
  1288. static struct device **create_namespace_io(struct nd_region *nd_region)
  1289. {
  1290. struct nd_namespace_io *nsio;
  1291. struct device *dev, **devs;
  1292. struct resource *res;
  1293. nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
  1294. if (!nsio)
  1295. return NULL;
  1296. devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
  1297. if (!devs) {
  1298. kfree(nsio);
  1299. return NULL;
  1300. }
  1301. dev = &nsio->common.dev;
  1302. dev->type = &namespace_io_device_type;
  1303. dev->parent = &nd_region->dev;
  1304. res = &nsio->res;
  1305. res->name = dev_name(&nd_region->dev);
  1306. res->flags = IORESOURCE_MEM;
  1307. res->start = nd_region->ndr_start;
  1308. res->end = res->start + nd_region->ndr_size - 1;
  1309. devs[0] = dev;
  1310. return devs;
  1311. }
  1312. static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
  1313. u64 cookie, u16 pos)
  1314. {
  1315. struct nd_namespace_label *found = NULL;
  1316. int i;
  1317. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1318. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1319. struct nd_interleave_set *nd_set = nd_region->nd_set;
  1320. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1321. struct nd_label_ent *label_ent;
  1322. bool found_uuid = false;
  1323. list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  1324. struct nd_namespace_label *nd_label = label_ent->label;
  1325. u16 position;
  1326. if (!nd_label)
  1327. continue;
  1328. position = nsl_get_position(ndd, nd_label);
  1329. if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
  1330. continue;
  1331. if (!nsl_uuid_equal(ndd, nd_label, uuid))
  1332. continue;
  1333. if (!nsl_validate_type_guid(ndd, nd_label,
  1334. &nd_set->type_guid))
  1335. continue;
  1336. if (found_uuid) {
  1337. dev_dbg(ndd->dev, "duplicate entry for uuid\n");
  1338. return false;
  1339. }
  1340. found_uuid = true;
  1341. if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
  1342. continue;
  1343. if (position != pos)
  1344. continue;
  1345. found = nd_label;
  1346. break;
  1347. }
  1348. if (found)
  1349. break;
  1350. }
  1351. return found != NULL;
  1352. }
  1353. static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
  1354. {
  1355. int i;
  1356. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1357. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1358. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1359. struct nd_namespace_label *nd_label = NULL;
  1360. u64 hw_start, hw_end, pmem_start, pmem_end;
  1361. struct nd_label_ent *label_ent;
  1362. lockdep_assert_held(&nd_mapping->lock);
  1363. list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  1364. nd_label = label_ent->label;
  1365. if (!nd_label)
  1366. continue;
  1367. if (nsl_uuid_equal(ndd, nd_label, pmem_id))
  1368. break;
  1369. nd_label = NULL;
  1370. }
  1371. if (!nd_label) {
  1372. WARN_ON(1);
  1373. return -EINVAL;
  1374. }
  1375. /*
  1376. * Check that this label is compliant with the dpa
  1377. * range published in NFIT
  1378. */
  1379. hw_start = nd_mapping->start;
  1380. hw_end = hw_start + nd_mapping->size;
  1381. pmem_start = nsl_get_dpa(ndd, nd_label);
  1382. pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
  1383. if (pmem_start >= hw_start && pmem_start < hw_end
  1384. && pmem_end <= hw_end && pmem_end > hw_start)
  1385. /* pass */;
  1386. else {
  1387. dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
  1388. dev_name(ndd->dev),
  1389. nsl_uuid_raw(ndd, nd_label));
  1390. return -EINVAL;
  1391. }
  1392. /* move recently validated label to the front of the list */
  1393. list_move(&label_ent->list, &nd_mapping->labels);
  1394. }
  1395. return 0;
  1396. }
  1397. /**
  1398. * create_namespace_pmem - validate interleave set labelling, retrieve label0
  1399. * @nd_region: region with mappings to validate
  1400. * @nd_mapping: container of dpa-resource-root + labels
  1401. * @nd_label: target pmem namespace label to evaluate
  1402. *
  1403. * Returns: the created &struct device on success or ERR_PTR(-errno) on error
  1404. */
  1405. static struct device *create_namespace_pmem(struct nd_region *nd_region,
  1406. struct nd_mapping *nd_mapping,
  1407. struct nd_namespace_label *nd_label)
  1408. {
  1409. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1410. struct nd_namespace_index *nsindex =
  1411. to_namespace_index(ndd, ndd->ns_current);
  1412. u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
  1413. u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
  1414. struct nd_label_ent *label_ent;
  1415. struct nd_namespace_pmem *nspm;
  1416. resource_size_t size = 0;
  1417. struct resource *res;
  1418. struct device *dev;
  1419. uuid_t uuid;
  1420. int rc = 0;
  1421. u16 i;
  1422. if (cookie == 0) {
  1423. dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
  1424. return ERR_PTR(-ENXIO);
  1425. }
  1426. if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
  1427. dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
  1428. nsl_uuid_raw(ndd, nd_label));
  1429. if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
  1430. return ERR_PTR(-EAGAIN);
  1431. dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
  1432. nsl_uuid_raw(ndd, nd_label));
  1433. }
  1434. nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
  1435. if (!nspm)
  1436. return ERR_PTR(-ENOMEM);
  1437. nspm->id = -1;
  1438. dev = &nspm->nsio.common.dev;
  1439. dev->type = &namespace_pmem_device_type;
  1440. dev->parent = &nd_region->dev;
  1441. res = &nspm->nsio.res;
  1442. res->name = dev_name(&nd_region->dev);
  1443. res->flags = IORESOURCE_MEM;
  1444. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1445. nsl_get_uuid(ndd, nd_label, &uuid);
  1446. if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
  1447. continue;
  1448. if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
  1449. continue;
  1450. break;
  1451. }
  1452. if (i < nd_region->ndr_mappings) {
  1453. struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
  1454. /*
  1455. * Give up if we don't find an instance of a uuid at each
  1456. * position (from 0 to nd_region->ndr_mappings - 1), or if we
  1457. * find a dimm with two instances of the same uuid.
  1458. */
  1459. dev_err(&nd_region->dev, "%s missing label for %pUb\n",
  1460. nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
  1461. rc = -EINVAL;
  1462. goto err;
  1463. }
  1464. /*
  1465. * Fix up each mapping's 'labels' to have the validated pmem label for
  1466. * that position at labels[0], and NULL at labels[1]. In the process,
  1467. * check that the namespace aligns with interleave-set.
  1468. */
  1469. nsl_get_uuid(ndd, nd_label, &uuid);
  1470. rc = select_pmem_id(nd_region, &uuid);
  1471. if (rc)
  1472. goto err;
  1473. /* Calculate total size and populate namespace properties from label0 */
  1474. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1475. struct nd_namespace_label *label0;
  1476. struct nvdimm_drvdata *ndd;
  1477. nd_mapping = &nd_region->mapping[i];
  1478. label_ent = list_first_entry_or_null(&nd_mapping->labels,
  1479. typeof(*label_ent), list);
  1480. label0 = label_ent ? label_ent->label : NULL;
  1481. if (!label0) {
  1482. WARN_ON(1);
  1483. continue;
  1484. }
  1485. ndd = to_ndd(nd_mapping);
  1486. size += nsl_get_rawsize(ndd, label0);
  1487. if (nsl_get_position(ndd, label0) != 0)
  1488. continue;
  1489. WARN_ON(nspm->alt_name || nspm->uuid);
  1490. nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
  1491. NSLABEL_NAME_LEN, GFP_KERNEL);
  1492. nsl_get_uuid(ndd, label0, &uuid);
  1493. nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
  1494. nspm->lbasize = nsl_get_lbasize(ndd, label0);
  1495. nspm->nsio.common.claim_class =
  1496. nsl_get_claim_class(ndd, label0);
  1497. }
  1498. if (!nspm->alt_name || !nspm->uuid) {
  1499. rc = -ENOMEM;
  1500. goto err;
  1501. }
  1502. nd_namespace_pmem_set_resource(nd_region, nspm, size);
  1503. return dev;
  1504. err:
  1505. namespace_pmem_release(dev);
  1506. switch (rc) {
  1507. case -EINVAL:
  1508. dev_dbg(&nd_region->dev, "invalid label(s)\n");
  1509. break;
  1510. default:
  1511. dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
  1512. break;
  1513. }
  1514. return ERR_PTR(rc);
  1515. }
  1516. static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
  1517. {
  1518. struct nd_namespace_pmem *nspm;
  1519. struct resource *res;
  1520. struct device *dev;
  1521. if (!is_memory(&nd_region->dev))
  1522. return NULL;
  1523. nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
  1524. if (!nspm)
  1525. return NULL;
  1526. dev = &nspm->nsio.common.dev;
  1527. dev->type = &namespace_pmem_device_type;
  1528. dev->parent = &nd_region->dev;
  1529. res = &nspm->nsio.res;
  1530. res->name = dev_name(&nd_region->dev);
  1531. res->flags = IORESOURCE_MEM;
  1532. nspm->id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL);
  1533. if (nspm->id < 0) {
  1534. kfree(nspm);
  1535. return NULL;
  1536. }
  1537. dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
  1538. nd_namespace_pmem_set_resource(nd_region, nspm, 0);
  1539. return dev;
  1540. }
  1541. static struct lock_class_key nvdimm_namespace_key;
  1542. void nd_region_create_ns_seed(struct nd_region *nd_region)
  1543. {
  1544. WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
  1545. if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
  1546. return;
  1547. nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
  1548. /*
  1549. * Seed creation failures are not fatal, provisioning is simply
  1550. * disabled until memory becomes available
  1551. */
  1552. if (!nd_region->ns_seed)
  1553. dev_err(&nd_region->dev, "failed to create namespace\n");
  1554. else {
  1555. device_initialize(nd_region->ns_seed);
  1556. lockdep_set_class(&nd_region->ns_seed->mutex,
  1557. &nvdimm_namespace_key);
  1558. nd_device_register(nd_region->ns_seed);
  1559. }
  1560. }
  1561. void nd_region_create_dax_seed(struct nd_region *nd_region)
  1562. {
  1563. WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
  1564. nd_region->dax_seed = nd_dax_create(nd_region);
  1565. /*
  1566. * Seed creation failures are not fatal, provisioning is simply
  1567. * disabled until memory becomes available
  1568. */
  1569. if (!nd_region->dax_seed)
  1570. dev_err(&nd_region->dev, "failed to create dax namespace\n");
  1571. }
  1572. void nd_region_create_pfn_seed(struct nd_region *nd_region)
  1573. {
  1574. WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
  1575. nd_region->pfn_seed = nd_pfn_create(nd_region);
  1576. /*
  1577. * Seed creation failures are not fatal, provisioning is simply
  1578. * disabled until memory becomes available
  1579. */
  1580. if (!nd_region->pfn_seed)
  1581. dev_err(&nd_region->dev, "failed to create pfn namespace\n");
  1582. }
  1583. void nd_region_create_btt_seed(struct nd_region *nd_region)
  1584. {
  1585. WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
  1586. nd_region->btt_seed = nd_btt_create(nd_region);
  1587. /*
  1588. * Seed creation failures are not fatal, provisioning is simply
  1589. * disabled until memory becomes available
  1590. */
  1591. if (!nd_region->btt_seed)
  1592. dev_err(&nd_region->dev, "failed to create btt namespace\n");
  1593. }
  1594. static int add_namespace_resource(struct nd_region *nd_region,
  1595. struct nd_namespace_label *nd_label, struct device **devs,
  1596. int count)
  1597. {
  1598. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  1599. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1600. int i;
  1601. for (i = 0; i < count; i++) {
  1602. uuid_t *uuid = namespace_to_uuid(devs[i]);
  1603. if (IS_ERR(uuid)) {
  1604. WARN_ON(1);
  1605. continue;
  1606. }
  1607. if (!nsl_uuid_equal(ndd, nd_label, uuid))
  1608. continue;
  1609. dev_err(&nd_region->dev,
  1610. "error: conflicting extents for uuid: %pUb\n", uuid);
  1611. return -ENXIO;
  1612. }
  1613. return i;
  1614. }
  1615. static int cmp_dpa(const void *a, const void *b)
  1616. {
  1617. const struct device *dev_a = *(const struct device **) a;
  1618. const struct device *dev_b = *(const struct device **) b;
  1619. struct nd_namespace_pmem *nspm_a, *nspm_b;
  1620. if (is_namespace_io(dev_a))
  1621. return 0;
  1622. nspm_a = to_nd_namespace_pmem(dev_a);
  1623. nspm_b = to_nd_namespace_pmem(dev_b);
  1624. return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
  1625. sizeof(resource_size_t));
  1626. }
  1627. static struct device **scan_labels(struct nd_region *nd_region)
  1628. {
  1629. int i, count = 0;
  1630. struct device *dev, **devs;
  1631. struct nd_label_ent *label_ent, *e;
  1632. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  1633. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1634. resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
  1635. devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
  1636. if (!devs)
  1637. return NULL;
  1638. /* "safe" because create_namespace_pmem() might list_move() label_ent */
  1639. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
  1640. struct nd_namespace_label *nd_label = label_ent->label;
  1641. struct device **__devs;
  1642. if (!nd_label)
  1643. continue;
  1644. /* skip labels that describe extents outside of the region */
  1645. if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
  1646. nsl_get_dpa(ndd, nd_label) > map_end)
  1647. continue;
  1648. i = add_namespace_resource(nd_region, nd_label, devs, count);
  1649. if (i < 0)
  1650. goto err;
  1651. if (i < count)
  1652. continue;
  1653. if (count) {
  1654. __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
  1655. if (!__devs)
  1656. goto err;
  1657. memcpy(__devs, devs, sizeof(dev) * count);
  1658. kfree(devs);
  1659. devs = __devs;
  1660. }
  1661. dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
  1662. if (IS_ERR(dev)) {
  1663. switch (PTR_ERR(dev)) {
  1664. case -EAGAIN:
  1665. /* skip invalid labels */
  1666. continue;
  1667. default:
  1668. goto err;
  1669. }
  1670. } else
  1671. devs[count++] = dev;
  1672. }
  1673. dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
  1674. count == 1 ? "" : "s");
  1675. if (count == 0) {
  1676. struct nd_namespace_pmem *nspm;
  1677. /* Publish a zero-sized namespace for userspace to configure. */
  1678. nd_mapping_free_labels(nd_mapping);
  1679. nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
  1680. if (!nspm)
  1681. goto err;
  1682. dev = &nspm->nsio.common.dev;
  1683. dev->type = &namespace_pmem_device_type;
  1684. nd_namespace_pmem_set_resource(nd_region, nspm, 0);
  1685. dev->parent = &nd_region->dev;
  1686. devs[count++] = dev;
  1687. } else if (is_memory(&nd_region->dev)) {
  1688. /* clean unselected labels */
  1689. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1690. struct list_head *l, *e;
  1691. LIST_HEAD(list);
  1692. int j;
  1693. nd_mapping = &nd_region->mapping[i];
  1694. if (list_empty(&nd_mapping->labels)) {
  1695. WARN_ON(1);
  1696. continue;
  1697. }
  1698. j = count;
  1699. list_for_each_safe(l, e, &nd_mapping->labels) {
  1700. if (!j--)
  1701. break;
  1702. list_move_tail(l, &list);
  1703. }
  1704. nd_mapping_free_labels(nd_mapping);
  1705. list_splice_init(&list, &nd_mapping->labels);
  1706. }
  1707. }
  1708. if (count > 1)
  1709. sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
  1710. return devs;
  1711. err:
  1712. for (i = 0; devs[i]; i++)
  1713. namespace_pmem_release(devs[i]);
  1714. kfree(devs);
  1715. return NULL;
  1716. }
  1717. static struct device **create_namespaces(struct nd_region *nd_region)
  1718. {
  1719. struct nd_mapping *nd_mapping;
  1720. struct device **devs;
  1721. int i;
  1722. if (nd_region->ndr_mappings == 0)
  1723. return NULL;
  1724. /* lock down all mappings while we scan labels */
  1725. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1726. nd_mapping = &nd_region->mapping[i];
  1727. mutex_lock_nested(&nd_mapping->lock, i);
  1728. }
  1729. devs = scan_labels(nd_region);
  1730. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1731. int reverse = nd_region->ndr_mappings - 1 - i;
  1732. nd_mapping = &nd_region->mapping[reverse];
  1733. mutex_unlock(&nd_mapping->lock);
  1734. }
  1735. return devs;
  1736. }
  1737. static void deactivate_labels(void *region)
  1738. {
  1739. struct nd_region *nd_region = region;
  1740. int i;
  1741. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1742. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1743. struct nvdimm_drvdata *ndd = nd_mapping->ndd;
  1744. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  1745. mutex_lock(&nd_mapping->lock);
  1746. nd_mapping_free_labels(nd_mapping);
  1747. mutex_unlock(&nd_mapping->lock);
  1748. put_ndd(ndd);
  1749. nd_mapping->ndd = NULL;
  1750. if (ndd)
  1751. atomic_dec(&nvdimm->busy);
  1752. }
  1753. }
  1754. static int init_active_labels(struct nd_region *nd_region)
  1755. {
  1756. int i, rc = 0;
  1757. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1758. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1759. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1760. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  1761. struct nd_label_ent *label_ent;
  1762. int count, j;
  1763. /*
  1764. * If the dimm is disabled then we may need to prevent
  1765. * the region from being activated.
  1766. */
  1767. if (!ndd) {
  1768. if (test_bit(NDD_LOCKED, &nvdimm->flags))
  1769. /* fail, label data may be unreadable */;
  1770. else if (test_bit(NDD_LABELING, &nvdimm->flags))
  1771. /* fail, labels needed to disambiguate dpa */;
  1772. else
  1773. continue;
  1774. dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
  1775. dev_name(&nd_mapping->nvdimm->dev),
  1776. test_bit(NDD_LOCKED, &nvdimm->flags)
  1777. ? "locked" : "disabled");
  1778. rc = -ENXIO;
  1779. goto out;
  1780. }
  1781. nd_mapping->ndd = ndd;
  1782. atomic_inc(&nvdimm->busy);
  1783. get_ndd(ndd);
  1784. count = nd_label_active_count(ndd);
  1785. dev_dbg(ndd->dev, "count: %d\n", count);
  1786. if (!count)
  1787. continue;
  1788. for (j = 0; j < count; j++) {
  1789. struct nd_namespace_label *label;
  1790. label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
  1791. if (!label_ent)
  1792. break;
  1793. label = nd_label_active(ndd, j);
  1794. label_ent->label = label;
  1795. mutex_lock(&nd_mapping->lock);
  1796. list_add_tail(&label_ent->list, &nd_mapping->labels);
  1797. mutex_unlock(&nd_mapping->lock);
  1798. }
  1799. if (j < count)
  1800. break;
  1801. }
  1802. if (i < nd_region->ndr_mappings)
  1803. rc = -ENOMEM;
  1804. out:
  1805. if (rc) {
  1806. deactivate_labels(nd_region);
  1807. return rc;
  1808. }
  1809. return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
  1810. nd_region);
  1811. }
  1812. int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
  1813. {
  1814. struct device **devs = NULL;
  1815. int i, rc = 0, type;
  1816. *err = 0;
  1817. nvdimm_bus_lock(&nd_region->dev);
  1818. rc = init_active_labels(nd_region);
  1819. if (rc) {
  1820. nvdimm_bus_unlock(&nd_region->dev);
  1821. return rc;
  1822. }
  1823. type = nd_region_to_nstype(nd_region);
  1824. switch (type) {
  1825. case ND_DEVICE_NAMESPACE_IO:
  1826. devs = create_namespace_io(nd_region);
  1827. break;
  1828. case ND_DEVICE_NAMESPACE_PMEM:
  1829. devs = create_namespaces(nd_region);
  1830. break;
  1831. default:
  1832. break;
  1833. }
  1834. nvdimm_bus_unlock(&nd_region->dev);
  1835. if (!devs)
  1836. return -ENODEV;
  1837. for (i = 0; devs[i]; i++) {
  1838. struct device *dev = devs[i];
  1839. int id;
  1840. if (type == ND_DEVICE_NAMESPACE_PMEM) {
  1841. struct nd_namespace_pmem *nspm;
  1842. nspm = to_nd_namespace_pmem(dev);
  1843. id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL);
  1844. nspm->id = id;
  1845. } else
  1846. id = i;
  1847. if (id < 0)
  1848. break;
  1849. dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
  1850. device_initialize(dev);
  1851. lockdep_set_class(&dev->mutex, &nvdimm_namespace_key);
  1852. nd_device_register(dev);
  1853. }
  1854. if (i)
  1855. nd_region->ns_seed = devs[0];
  1856. if (devs[i]) {
  1857. int j;
  1858. for (j = i; devs[j]; j++) {
  1859. struct device *dev = devs[j];
  1860. device_initialize(dev);
  1861. put_device(dev);
  1862. }
  1863. *err = j - i;
  1864. /*
  1865. * All of the namespaces we tried to register failed, so
  1866. * fail region activation.
  1867. */
  1868. if (*err == 0)
  1869. rc = -ENODEV;
  1870. }
  1871. kfree(devs);
  1872. if (rc == -ENODEV)
  1873. return rc;
  1874. return i;
  1875. }