label.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/ndctl.h>
  15. #include <linux/uuid.h>
  16. #include <linux/slab.h>
  17. #include <linux/io.h>
  18. #include <linux/nd.h>
  19. #include "nd-core.h"
  20. #include "label.h"
  21. #include "nd.h"
  22. static guid_t nvdimm_btt_guid;
  23. static guid_t nvdimm_btt2_guid;
  24. static guid_t nvdimm_pfn_guid;
  25. static guid_t nvdimm_dax_guid;
  26. static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
  27. static u32 best_seq(u32 a, u32 b)
  28. {
  29. a &= NSINDEX_SEQ_MASK;
  30. b &= NSINDEX_SEQ_MASK;
  31. if (a == 0 || a == b)
  32. return b;
  33. else if (b == 0)
  34. return a;
  35. else if (nd_inc_seq(a) == b)
  36. return b;
  37. else
  38. return a;
  39. }
  40. unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
  41. {
  42. return ndd->nslabel_size;
  43. }
  44. static size_t __sizeof_namespace_index(u32 nslot)
  45. {
  46. return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
  47. NSINDEX_ALIGN);
  48. }
  49. static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
  50. size_t index_size)
  51. {
  52. return (ndd->nsarea.config_size - index_size * 2) /
  53. sizeof_namespace_label(ndd);
  54. }
  55. int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
  56. {
  57. u32 tmp_nslot, n;
  58. tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
  59. n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
  60. return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
  61. }
  62. size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
  63. {
  64. u32 nslot, space, size;
  65. /*
  66. * Per UEFI 2.7, the minimum size of the Label Storage Area is large
  67. * enough to hold 2 index blocks and 2 labels. The minimum index
  68. * block size is 256 bytes, and the minimum label size is 256 bytes.
  69. */
  70. nslot = nvdimm_num_label_slots(ndd);
  71. space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
  72. size = __sizeof_namespace_index(nslot) * 2;
  73. if (size <= space && nslot >= 2)
  74. return size / 2;
  75. dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
  76. ndd->nsarea.config_size, sizeof_namespace_label(ndd));
  77. return 0;
  78. }
  79. static int __nd_label_validate(struct nvdimm_drvdata *ndd)
  80. {
  81. /*
  82. * On media label format consists of two index blocks followed
  83. * by an array of labels. None of these structures are ever
  84. * updated in place. A sequence number tracks the current
  85. * active index and the next one to write, while labels are
  86. * written to free slots.
  87. *
  88. * +------------+
  89. * | |
  90. * | nsindex0 |
  91. * | |
  92. * +------------+
  93. * | |
  94. * | nsindex1 |
  95. * | |
  96. * +------------+
  97. * | label0 |
  98. * +------------+
  99. * | label1 |
  100. * +------------+
  101. * | |
  102. * ....nslot...
  103. * | |
  104. * +------------+
  105. * | labelN |
  106. * +------------+
  107. */
  108. struct nd_namespace_index *nsindex[] = {
  109. to_namespace_index(ndd, 0),
  110. to_namespace_index(ndd, 1),
  111. };
  112. const int num_index = ARRAY_SIZE(nsindex);
  113. struct device *dev = ndd->dev;
  114. bool valid[2] = { 0 };
  115. int i, num_valid = 0;
  116. u32 seq;
  117. for (i = 0; i < num_index; i++) {
  118. u32 nslot;
  119. u8 sig[NSINDEX_SIG_LEN];
  120. u64 sum_save, sum, size;
  121. unsigned int version, labelsize;
  122. memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
  123. if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
  124. dev_dbg(dev, "nsindex%d signature invalid\n", i);
  125. continue;
  126. }
  127. /* label sizes larger than 128 arrived with v1.2 */
  128. version = __le16_to_cpu(nsindex[i]->major) * 100
  129. + __le16_to_cpu(nsindex[i]->minor);
  130. if (version >= 102)
  131. labelsize = 1 << (7 + nsindex[i]->labelsize);
  132. else
  133. labelsize = 128;
  134. if (labelsize != sizeof_namespace_label(ndd)) {
  135. dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
  136. i, nsindex[i]->labelsize);
  137. continue;
  138. }
  139. sum_save = __le64_to_cpu(nsindex[i]->checksum);
  140. nsindex[i]->checksum = __cpu_to_le64(0);
  141. sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
  142. nsindex[i]->checksum = __cpu_to_le64(sum_save);
  143. if (sum != sum_save) {
  144. dev_dbg(dev, "nsindex%d checksum invalid\n", i);
  145. continue;
  146. }
  147. seq = __le32_to_cpu(nsindex[i]->seq);
  148. if ((seq & NSINDEX_SEQ_MASK) == 0) {
  149. dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
  150. continue;
  151. }
  152. /* sanity check the index against expected values */
  153. if (__le64_to_cpu(nsindex[i]->myoff)
  154. != i * sizeof_namespace_index(ndd)) {
  155. dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
  156. i, (unsigned long long)
  157. __le64_to_cpu(nsindex[i]->myoff));
  158. continue;
  159. }
  160. if (__le64_to_cpu(nsindex[i]->otheroff)
  161. != (!i) * sizeof_namespace_index(ndd)) {
  162. dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
  163. i, (unsigned long long)
  164. __le64_to_cpu(nsindex[i]->otheroff));
  165. continue;
  166. }
  167. size = __le64_to_cpu(nsindex[i]->mysize);
  168. if (size > sizeof_namespace_index(ndd)
  169. || size < sizeof(struct nd_namespace_index)) {
  170. dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
  171. continue;
  172. }
  173. nslot = __le32_to_cpu(nsindex[i]->nslot);
  174. if (nslot * sizeof_namespace_label(ndd)
  175. + 2 * sizeof_namespace_index(ndd)
  176. > ndd->nsarea.config_size) {
  177. dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
  178. i, nslot, ndd->nsarea.config_size);
  179. continue;
  180. }
  181. valid[i] = true;
  182. num_valid++;
  183. }
  184. switch (num_valid) {
  185. case 0:
  186. break;
  187. case 1:
  188. for (i = 0; i < num_index; i++)
  189. if (valid[i])
  190. return i;
  191. /* can't have num_valid > 0 but valid[] = { false, false } */
  192. WARN_ON(1);
  193. break;
  194. default:
  195. /* pick the best index... */
  196. seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
  197. __le32_to_cpu(nsindex[1]->seq));
  198. if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
  199. return 1;
  200. else
  201. return 0;
  202. break;
  203. }
  204. return -1;
  205. }
  206. int nd_label_validate(struct nvdimm_drvdata *ndd)
  207. {
  208. /*
  209. * In order to probe for and validate namespace index blocks we
  210. * need to know the size of the labels, and we can't trust the
  211. * size of the labels until we validate the index blocks.
  212. * Resolve this dependency loop by probing for known label
  213. * sizes, but default to v1.2 256-byte namespace labels if
  214. * discovery fails.
  215. */
  216. int label_size[] = { 128, 256 };
  217. int i, rc;
  218. for (i = 0; i < ARRAY_SIZE(label_size); i++) {
  219. ndd->nslabel_size = label_size[i];
  220. rc = __nd_label_validate(ndd);
  221. if (rc >= 0)
  222. return rc;
  223. }
  224. return -1;
  225. }
  226. void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
  227. struct nd_namespace_index *src)
  228. {
  229. if (dst && src)
  230. /* pass */;
  231. else
  232. return;
  233. memcpy(dst, src, sizeof_namespace_index(ndd));
  234. }
  235. static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
  236. {
  237. void *base = to_namespace_index(ndd, 0);
  238. return base + 2 * sizeof_namespace_index(ndd);
  239. }
  240. static int to_slot(struct nvdimm_drvdata *ndd,
  241. struct nd_namespace_label *nd_label)
  242. {
  243. unsigned long label, base;
  244. label = (unsigned long) nd_label;
  245. base = (unsigned long) nd_label_base(ndd);
  246. return (label - base) / sizeof_namespace_label(ndd);
  247. }
  248. static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
  249. {
  250. unsigned long label, base;
  251. base = (unsigned long) nd_label_base(ndd);
  252. label = base + sizeof_namespace_label(ndd) * slot;
  253. return (struct nd_namespace_label *) label;
  254. }
  255. #define for_each_clear_bit_le(bit, addr, size) \
  256. for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
  257. (bit) < (size); \
  258. (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
  259. /**
  260. * preamble_index - common variable initialization for nd_label_* routines
  261. * @ndd: dimm container for the relevant label set
  262. * @idx: namespace_index index
  263. * @nsindex_out: on return set to the currently active namespace index
  264. * @free: on return set to the free label bitmap in the index
  265. * @nslot: on return set to the number of slots in the label space
  266. */
  267. static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
  268. struct nd_namespace_index **nsindex_out,
  269. unsigned long **free, u32 *nslot)
  270. {
  271. struct nd_namespace_index *nsindex;
  272. nsindex = to_namespace_index(ndd, idx);
  273. if (nsindex == NULL)
  274. return false;
  275. *free = (unsigned long *) nsindex->free;
  276. *nslot = __le32_to_cpu(nsindex->nslot);
  277. *nsindex_out = nsindex;
  278. return true;
  279. }
  280. char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
  281. {
  282. if (!label_id || !uuid)
  283. return NULL;
  284. snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
  285. flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
  286. return label_id->id;
  287. }
  288. static bool preamble_current(struct nvdimm_drvdata *ndd,
  289. struct nd_namespace_index **nsindex,
  290. unsigned long **free, u32 *nslot)
  291. {
  292. return preamble_index(ndd, ndd->ns_current, nsindex,
  293. free, nslot);
  294. }
  295. static bool preamble_next(struct nvdimm_drvdata *ndd,
  296. struct nd_namespace_index **nsindex,
  297. unsigned long **free, u32 *nslot)
  298. {
  299. return preamble_index(ndd, ndd->ns_next, nsindex,
  300. free, nslot);
  301. }
  302. static bool slot_valid(struct nvdimm_drvdata *ndd,
  303. struct nd_namespace_label *nd_label, u32 slot)
  304. {
  305. /* check that we are written where we expect to be written */
  306. if (slot != __le32_to_cpu(nd_label->slot))
  307. return false;
  308. /* check that DPA allocations are page aligned */
  309. if ((__le64_to_cpu(nd_label->dpa)
  310. | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
  311. return false;
  312. /* check checksum */
  313. if (namespace_label_has(ndd, checksum)) {
  314. u64 sum, sum_save;
  315. sum_save = __le64_to_cpu(nd_label->checksum);
  316. nd_label->checksum = __cpu_to_le64(0);
  317. sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
  318. nd_label->checksum = __cpu_to_le64(sum_save);
  319. if (sum != sum_save) {
  320. dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
  321. slot, sum);
  322. return false;
  323. }
  324. }
  325. return true;
  326. }
  327. int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
  328. {
  329. struct nd_namespace_index *nsindex;
  330. unsigned long *free;
  331. u32 nslot, slot;
  332. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  333. return 0; /* no label, nothing to reserve */
  334. for_each_clear_bit_le(slot, free, nslot) {
  335. struct nd_namespace_label *nd_label;
  336. struct nd_region *nd_region = NULL;
  337. u8 label_uuid[NSLABEL_UUID_LEN];
  338. struct nd_label_id label_id;
  339. struct resource *res;
  340. u32 flags;
  341. nd_label = to_label(ndd, slot);
  342. if (!slot_valid(ndd, nd_label, slot))
  343. continue;
  344. memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  345. flags = __le32_to_cpu(nd_label->flags);
  346. nd_label_gen_id(&label_id, label_uuid, flags);
  347. res = nvdimm_allocate_dpa(ndd, &label_id,
  348. __le64_to_cpu(nd_label->dpa),
  349. __le64_to_cpu(nd_label->rawsize));
  350. nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
  351. if (!res)
  352. return -EBUSY;
  353. }
  354. return 0;
  355. }
  356. int nd_label_active_count(struct nvdimm_drvdata *ndd)
  357. {
  358. struct nd_namespace_index *nsindex;
  359. unsigned long *free;
  360. u32 nslot, slot;
  361. int count = 0;
  362. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  363. return 0;
  364. for_each_clear_bit_le(slot, free, nslot) {
  365. struct nd_namespace_label *nd_label;
  366. nd_label = to_label(ndd, slot);
  367. if (!slot_valid(ndd, nd_label, slot)) {
  368. u32 label_slot = __le32_to_cpu(nd_label->slot);
  369. u64 size = __le64_to_cpu(nd_label->rawsize);
  370. u64 dpa = __le64_to_cpu(nd_label->dpa);
  371. dev_dbg(ndd->dev,
  372. "slot%d invalid slot: %d dpa: %llx size: %llx\n",
  373. slot, label_slot, dpa, size);
  374. continue;
  375. }
  376. count++;
  377. }
  378. return count;
  379. }
  380. struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
  381. {
  382. struct nd_namespace_index *nsindex;
  383. unsigned long *free;
  384. u32 nslot, slot;
  385. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  386. return NULL;
  387. for_each_clear_bit_le(slot, free, nslot) {
  388. struct nd_namespace_label *nd_label;
  389. nd_label = to_label(ndd, slot);
  390. if (!slot_valid(ndd, nd_label, slot))
  391. continue;
  392. if (n-- == 0)
  393. return to_label(ndd, slot);
  394. }
  395. return NULL;
  396. }
  397. u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
  398. {
  399. struct nd_namespace_index *nsindex;
  400. unsigned long *free;
  401. u32 nslot, slot;
  402. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  403. return UINT_MAX;
  404. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  405. slot = find_next_bit_le(free, nslot, 0);
  406. if (slot == nslot)
  407. return UINT_MAX;
  408. clear_bit_le(slot, free);
  409. return slot;
  410. }
  411. bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
  412. {
  413. struct nd_namespace_index *nsindex;
  414. unsigned long *free;
  415. u32 nslot;
  416. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  417. return false;
  418. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  419. if (slot < nslot)
  420. return !test_and_set_bit_le(slot, free);
  421. return false;
  422. }
  423. u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
  424. {
  425. struct nd_namespace_index *nsindex;
  426. unsigned long *free;
  427. u32 nslot;
  428. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  429. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  430. return nvdimm_num_label_slots(ndd);
  431. return bitmap_weight(free, nslot);
  432. }
  433. static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
  434. unsigned long flags)
  435. {
  436. struct nd_namespace_index *nsindex;
  437. unsigned long offset;
  438. u64 checksum;
  439. u32 nslot;
  440. int rc;
  441. nsindex = to_namespace_index(ndd, index);
  442. if (flags & ND_NSINDEX_INIT)
  443. nslot = nvdimm_num_label_slots(ndd);
  444. else
  445. nslot = __le32_to_cpu(nsindex->nslot);
  446. memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
  447. memset(&nsindex->flags, 0, 3);
  448. nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
  449. nsindex->seq = __cpu_to_le32(seq);
  450. offset = (unsigned long) nsindex
  451. - (unsigned long) to_namespace_index(ndd, 0);
  452. nsindex->myoff = __cpu_to_le64(offset);
  453. nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
  454. offset = (unsigned long) to_namespace_index(ndd,
  455. nd_label_next_nsindex(index))
  456. - (unsigned long) to_namespace_index(ndd, 0);
  457. nsindex->otheroff = __cpu_to_le64(offset);
  458. offset = (unsigned long) nd_label_base(ndd)
  459. - (unsigned long) to_namespace_index(ndd, 0);
  460. nsindex->labeloff = __cpu_to_le64(offset);
  461. nsindex->nslot = __cpu_to_le32(nslot);
  462. nsindex->major = __cpu_to_le16(1);
  463. if (sizeof_namespace_label(ndd) < 256)
  464. nsindex->minor = __cpu_to_le16(1);
  465. else
  466. nsindex->minor = __cpu_to_le16(2);
  467. nsindex->checksum = __cpu_to_le64(0);
  468. if (flags & ND_NSINDEX_INIT) {
  469. unsigned long *free = (unsigned long *) nsindex->free;
  470. u32 nfree = ALIGN(nslot, BITS_PER_LONG);
  471. int last_bits, i;
  472. memset(nsindex->free, 0xff, nfree / 8);
  473. for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
  474. clear_bit_le(nslot + i, free);
  475. }
  476. checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
  477. nsindex->checksum = __cpu_to_le64(checksum);
  478. rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
  479. nsindex, sizeof_namespace_index(ndd));
  480. if (rc < 0)
  481. return rc;
  482. if (flags & ND_NSINDEX_INIT)
  483. return 0;
  484. /* copy the index we just wrote to the new 'next' */
  485. WARN_ON(index != ndd->ns_next);
  486. nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
  487. ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
  488. ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
  489. WARN_ON(ndd->ns_current == ndd->ns_next);
  490. return 0;
  491. }
  492. static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
  493. struct nd_namespace_label *nd_label)
  494. {
  495. return (unsigned long) nd_label
  496. - (unsigned long) to_namespace_index(ndd, 0);
  497. }
  498. enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
  499. {
  500. if (guid_equal(guid, &nvdimm_btt_guid))
  501. return NVDIMM_CCLASS_BTT;
  502. else if (guid_equal(guid, &nvdimm_btt2_guid))
  503. return NVDIMM_CCLASS_BTT2;
  504. else if (guid_equal(guid, &nvdimm_pfn_guid))
  505. return NVDIMM_CCLASS_PFN;
  506. else if (guid_equal(guid, &nvdimm_dax_guid))
  507. return NVDIMM_CCLASS_DAX;
  508. else if (guid_equal(guid, &guid_null))
  509. return NVDIMM_CCLASS_NONE;
  510. return NVDIMM_CCLASS_UNKNOWN;
  511. }
  512. static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
  513. guid_t *target)
  514. {
  515. if (claim_class == NVDIMM_CCLASS_BTT)
  516. return &nvdimm_btt_guid;
  517. else if (claim_class == NVDIMM_CCLASS_BTT2)
  518. return &nvdimm_btt2_guid;
  519. else if (claim_class == NVDIMM_CCLASS_PFN)
  520. return &nvdimm_pfn_guid;
  521. else if (claim_class == NVDIMM_CCLASS_DAX)
  522. return &nvdimm_dax_guid;
  523. else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
  524. /*
  525. * If we're modifying a namespace for which we don't
  526. * know the claim_class, don't touch the existing guid.
  527. */
  528. return target;
  529. } else
  530. return &guid_null;
  531. }
  532. static void reap_victim(struct nd_mapping *nd_mapping,
  533. struct nd_label_ent *victim)
  534. {
  535. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  536. u32 slot = to_slot(ndd, victim->label);
  537. dev_dbg(ndd->dev, "free: %d\n", slot);
  538. nd_label_free_slot(ndd, slot);
  539. victim->label = NULL;
  540. }
  541. static int __pmem_label_update(struct nd_region *nd_region,
  542. struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
  543. int pos, unsigned long flags)
  544. {
  545. struct nd_namespace_common *ndns = &nspm->nsio.common;
  546. struct nd_interleave_set *nd_set = nd_region->nd_set;
  547. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  548. struct nd_namespace_label *nd_label;
  549. struct nd_namespace_index *nsindex;
  550. struct nd_label_ent *label_ent;
  551. struct nd_label_id label_id;
  552. struct resource *res;
  553. unsigned long *free;
  554. u32 nslot, slot;
  555. size_t offset;
  556. u64 cookie;
  557. int rc;
  558. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  559. return -ENXIO;
  560. cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
  561. nd_label_gen_id(&label_id, nspm->uuid, 0);
  562. for_each_dpa_resource(ndd, res)
  563. if (strcmp(res->name, label_id.id) == 0)
  564. break;
  565. if (!res) {
  566. WARN_ON_ONCE(1);
  567. return -ENXIO;
  568. }
  569. /* allocate and write the label to the staging (next) index */
  570. slot = nd_label_alloc_slot(ndd);
  571. if (slot == UINT_MAX)
  572. return -ENXIO;
  573. dev_dbg(ndd->dev, "allocated: %d\n", slot);
  574. nd_label = to_label(ndd, slot);
  575. memset(nd_label, 0, sizeof_namespace_label(ndd));
  576. memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
  577. if (nspm->alt_name)
  578. memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
  579. nd_label->flags = __cpu_to_le32(flags);
  580. nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
  581. nd_label->position = __cpu_to_le16(pos);
  582. nd_label->isetcookie = __cpu_to_le64(cookie);
  583. nd_label->rawsize = __cpu_to_le64(resource_size(res));
  584. nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
  585. nd_label->dpa = __cpu_to_le64(res->start);
  586. nd_label->slot = __cpu_to_le32(slot);
  587. if (namespace_label_has(ndd, type_guid))
  588. guid_copy(&nd_label->type_guid, &nd_set->type_guid);
  589. if (namespace_label_has(ndd, abstraction_guid))
  590. guid_copy(&nd_label->abstraction_guid,
  591. to_abstraction_guid(ndns->claim_class,
  592. &nd_label->abstraction_guid));
  593. if (namespace_label_has(ndd, checksum)) {
  594. u64 sum;
  595. nd_label->checksum = __cpu_to_le64(0);
  596. sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
  597. nd_label->checksum = __cpu_to_le64(sum);
  598. }
  599. nd_dbg_dpa(nd_region, ndd, res, "\n");
  600. /* update label */
  601. offset = nd_label_offset(ndd, nd_label);
  602. rc = nvdimm_set_config_data(ndd, offset, nd_label,
  603. sizeof_namespace_label(ndd));
  604. if (rc < 0)
  605. return rc;
  606. /* Garbage collect the previous label */
  607. mutex_lock(&nd_mapping->lock);
  608. list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  609. if (!label_ent->label)
  610. continue;
  611. if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
  612. || memcmp(nspm->uuid, label_ent->label->uuid,
  613. NSLABEL_UUID_LEN) == 0)
  614. reap_victim(nd_mapping, label_ent);
  615. }
  616. /* update index */
  617. rc = nd_label_write_index(ndd, ndd->ns_next,
  618. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  619. if (rc == 0) {
  620. list_for_each_entry(label_ent, &nd_mapping->labels, list)
  621. if (!label_ent->label) {
  622. label_ent->label = nd_label;
  623. nd_label = NULL;
  624. break;
  625. }
  626. dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
  627. "failed to track label: %d\n",
  628. to_slot(ndd, nd_label));
  629. if (nd_label)
  630. rc = -ENXIO;
  631. }
  632. mutex_unlock(&nd_mapping->lock);
  633. return rc;
  634. }
  635. static bool is_old_resource(struct resource *res, struct resource **list, int n)
  636. {
  637. int i;
  638. if (res->flags & DPA_RESOURCE_ADJUSTED)
  639. return false;
  640. for (i = 0; i < n; i++)
  641. if (res == list[i])
  642. return true;
  643. return false;
  644. }
  645. static struct resource *to_resource(struct nvdimm_drvdata *ndd,
  646. struct nd_namespace_label *nd_label)
  647. {
  648. struct resource *res;
  649. for_each_dpa_resource(ndd, res) {
  650. if (res->start != __le64_to_cpu(nd_label->dpa))
  651. continue;
  652. if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
  653. continue;
  654. return res;
  655. }
  656. return NULL;
  657. }
  658. /*
  659. * 1/ Account all the labels that can be freed after this update
  660. * 2/ Allocate and write the label to the staging (next) index
  661. * 3/ Record the resources in the namespace device
  662. */
  663. static int __blk_label_update(struct nd_region *nd_region,
  664. struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
  665. int num_labels)
  666. {
  667. int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
  668. struct nd_interleave_set *nd_set = nd_region->nd_set;
  669. struct nd_namespace_common *ndns = &nsblk->common;
  670. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  671. struct nd_namespace_label *nd_label;
  672. struct nd_label_ent *label_ent, *e;
  673. struct nd_namespace_index *nsindex;
  674. unsigned long *free, *victim_map = NULL;
  675. struct resource *res, **old_res_list;
  676. struct nd_label_id label_id;
  677. u8 uuid[NSLABEL_UUID_LEN];
  678. int min_dpa_idx = 0;
  679. LIST_HEAD(list);
  680. u32 nslot, slot;
  681. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  682. return -ENXIO;
  683. old_res_list = nsblk->res;
  684. nfree = nd_label_nfree(ndd);
  685. old_num_resources = nsblk->num_resources;
  686. nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
  687. /*
  688. * We need to loop over the old resources a few times, which seems a
  689. * bit inefficient, but we need to know that we have the label
  690. * space before we start mutating the tracking structures.
  691. * Otherwise the recovery method of last resort for userspace is
  692. * disable and re-enable the parent region.
  693. */
  694. alloc = 0;
  695. for_each_dpa_resource(ndd, res) {
  696. if (strcmp(res->name, label_id.id) != 0)
  697. continue;
  698. if (!is_old_resource(res, old_res_list, old_num_resources))
  699. alloc++;
  700. }
  701. victims = 0;
  702. if (old_num_resources) {
  703. /* convert old local-label-map to dimm-slot victim-map */
  704. victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
  705. GFP_KERNEL);
  706. if (!victim_map)
  707. return -ENOMEM;
  708. /* mark unused labels for garbage collection */
  709. for_each_clear_bit_le(slot, free, nslot) {
  710. nd_label = to_label(ndd, slot);
  711. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  712. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  713. continue;
  714. res = to_resource(ndd, nd_label);
  715. if (res && is_old_resource(res, old_res_list,
  716. old_num_resources))
  717. continue;
  718. slot = to_slot(ndd, nd_label);
  719. set_bit(slot, victim_map);
  720. victims++;
  721. }
  722. }
  723. /* don't allow updates that consume the last label */
  724. if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
  725. dev_info(&nsblk->common.dev, "insufficient label space\n");
  726. kfree(victim_map);
  727. return -ENOSPC;
  728. }
  729. /* from here on we need to abort on error */
  730. /* assign all resources to the namespace before writing the labels */
  731. nsblk->res = NULL;
  732. nsblk->num_resources = 0;
  733. for_each_dpa_resource(ndd, res) {
  734. if (strcmp(res->name, label_id.id) != 0)
  735. continue;
  736. if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
  737. rc = -ENOMEM;
  738. goto abort;
  739. }
  740. }
  741. /* release slots associated with any invalidated UUIDs */
  742. mutex_lock(&nd_mapping->lock);
  743. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
  744. if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
  745. reap_victim(nd_mapping, label_ent);
  746. list_move(&label_ent->list, &list);
  747. }
  748. mutex_unlock(&nd_mapping->lock);
  749. /*
  750. * Find the resource associated with the first label in the set
  751. * per the v1.2 namespace specification.
  752. */
  753. for (i = 0; i < nsblk->num_resources; i++) {
  754. struct resource *min = nsblk->res[min_dpa_idx];
  755. res = nsblk->res[i];
  756. if (res->start < min->start)
  757. min_dpa_idx = i;
  758. }
  759. for (i = 0; i < nsblk->num_resources; i++) {
  760. size_t offset;
  761. res = nsblk->res[i];
  762. if (is_old_resource(res, old_res_list, old_num_resources))
  763. continue; /* carry-over */
  764. slot = nd_label_alloc_slot(ndd);
  765. if (slot == UINT_MAX) {
  766. rc = -ENXIO;
  767. goto abort;
  768. }
  769. dev_dbg(ndd->dev, "allocated: %d\n", slot);
  770. nd_label = to_label(ndd, slot);
  771. memset(nd_label, 0, sizeof_namespace_label(ndd));
  772. memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
  773. if (nsblk->alt_name)
  774. memcpy(nd_label->name, nsblk->alt_name,
  775. NSLABEL_NAME_LEN);
  776. nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
  777. /*
  778. * Use the presence of the type_guid as a flag to
  779. * determine isetcookie usage and nlabel + position
  780. * policy for blk-aperture namespaces.
  781. */
  782. if (namespace_label_has(ndd, type_guid)) {
  783. if (i == min_dpa_idx) {
  784. nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
  785. nd_label->position = __cpu_to_le16(0);
  786. } else {
  787. nd_label->nlabel = __cpu_to_le16(0xffff);
  788. nd_label->position = __cpu_to_le16(0xffff);
  789. }
  790. nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
  791. } else {
  792. nd_label->nlabel = __cpu_to_le16(0); /* N/A */
  793. nd_label->position = __cpu_to_le16(0); /* N/A */
  794. nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
  795. }
  796. nd_label->dpa = __cpu_to_le64(res->start);
  797. nd_label->rawsize = __cpu_to_le64(resource_size(res));
  798. nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
  799. nd_label->slot = __cpu_to_le32(slot);
  800. if (namespace_label_has(ndd, type_guid))
  801. guid_copy(&nd_label->type_guid, &nd_set->type_guid);
  802. if (namespace_label_has(ndd, abstraction_guid))
  803. guid_copy(&nd_label->abstraction_guid,
  804. to_abstraction_guid(ndns->claim_class,
  805. &nd_label->abstraction_guid));
  806. if (namespace_label_has(ndd, checksum)) {
  807. u64 sum;
  808. nd_label->checksum = __cpu_to_le64(0);
  809. sum = nd_fletcher64(nd_label,
  810. sizeof_namespace_label(ndd), 1);
  811. nd_label->checksum = __cpu_to_le64(sum);
  812. }
  813. /* update label */
  814. offset = nd_label_offset(ndd, nd_label);
  815. rc = nvdimm_set_config_data(ndd, offset, nd_label,
  816. sizeof_namespace_label(ndd));
  817. if (rc < 0)
  818. goto abort;
  819. }
  820. /* free up now unused slots in the new index */
  821. for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
  822. dev_dbg(ndd->dev, "free: %d\n", slot);
  823. nd_label_free_slot(ndd, slot);
  824. }
  825. /* update index */
  826. rc = nd_label_write_index(ndd, ndd->ns_next,
  827. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  828. if (rc)
  829. goto abort;
  830. /*
  831. * Now that the on-dimm labels are up to date, fix up the tracking
  832. * entries in nd_mapping->labels
  833. */
  834. nlabel = 0;
  835. mutex_lock(&nd_mapping->lock);
  836. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
  837. nd_label = label_ent->label;
  838. if (!nd_label)
  839. continue;
  840. nlabel++;
  841. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  842. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  843. continue;
  844. nlabel--;
  845. list_move(&label_ent->list, &list);
  846. label_ent->label = NULL;
  847. }
  848. list_splice_tail_init(&list, &nd_mapping->labels);
  849. mutex_unlock(&nd_mapping->lock);
  850. if (nlabel + nsblk->num_resources > num_labels) {
  851. /*
  852. * Bug, we can't end up with more resources than
  853. * available labels
  854. */
  855. WARN_ON_ONCE(1);
  856. rc = -ENXIO;
  857. goto out;
  858. }
  859. mutex_lock(&nd_mapping->lock);
  860. label_ent = list_first_entry_or_null(&nd_mapping->labels,
  861. typeof(*label_ent), list);
  862. if (!label_ent) {
  863. WARN_ON(1);
  864. mutex_unlock(&nd_mapping->lock);
  865. rc = -ENXIO;
  866. goto out;
  867. }
  868. for_each_clear_bit_le(slot, free, nslot) {
  869. nd_label = to_label(ndd, slot);
  870. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  871. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  872. continue;
  873. res = to_resource(ndd, nd_label);
  874. res->flags &= ~DPA_RESOURCE_ADJUSTED;
  875. dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
  876. list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
  877. if (label_ent->label)
  878. continue;
  879. label_ent->label = nd_label;
  880. nd_label = NULL;
  881. break;
  882. }
  883. if (nd_label)
  884. dev_WARN(&nsblk->common.dev,
  885. "failed to track label slot%d\n", slot);
  886. }
  887. mutex_unlock(&nd_mapping->lock);
  888. out:
  889. kfree(old_res_list);
  890. kfree(victim_map);
  891. return rc;
  892. abort:
  893. /*
  894. * 1/ repair the allocated label bitmap in the index
  895. * 2/ restore the resource list
  896. */
  897. nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
  898. kfree(nsblk->res);
  899. nsblk->res = old_res_list;
  900. nsblk->num_resources = old_num_resources;
  901. old_res_list = NULL;
  902. goto out;
  903. }
  904. static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
  905. {
  906. int i, old_num_labels = 0;
  907. struct nd_label_ent *label_ent;
  908. struct nd_namespace_index *nsindex;
  909. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  910. mutex_lock(&nd_mapping->lock);
  911. list_for_each_entry(label_ent, &nd_mapping->labels, list)
  912. old_num_labels++;
  913. mutex_unlock(&nd_mapping->lock);
  914. /*
  915. * We need to preserve all the old labels for the mapping so
  916. * they can be garbage collected after writing the new labels.
  917. */
  918. for (i = old_num_labels; i < num_labels; i++) {
  919. label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
  920. if (!label_ent)
  921. return -ENOMEM;
  922. mutex_lock(&nd_mapping->lock);
  923. list_add_tail(&label_ent->list, &nd_mapping->labels);
  924. mutex_unlock(&nd_mapping->lock);
  925. }
  926. if (ndd->ns_current == -1 || ndd->ns_next == -1)
  927. /* pass */;
  928. else
  929. return max(num_labels, old_num_labels);
  930. nsindex = to_namespace_index(ndd, 0);
  931. memset(nsindex, 0, ndd->nsarea.config_size);
  932. for (i = 0; i < 2; i++) {
  933. int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
  934. if (rc)
  935. return rc;
  936. }
  937. ndd->ns_next = 1;
  938. ndd->ns_current = 0;
  939. return max(num_labels, old_num_labels);
  940. }
  941. static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
  942. {
  943. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  944. struct nd_label_ent *label_ent, *e;
  945. struct nd_namespace_index *nsindex;
  946. u8 label_uuid[NSLABEL_UUID_LEN];
  947. unsigned long *free;
  948. LIST_HEAD(list);
  949. u32 nslot, slot;
  950. int active = 0;
  951. if (!uuid)
  952. return 0;
  953. /* no index || no labels == nothing to delete */
  954. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  955. return 0;
  956. mutex_lock(&nd_mapping->lock);
  957. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
  958. struct nd_namespace_label *nd_label = label_ent->label;
  959. if (!nd_label)
  960. continue;
  961. active++;
  962. memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  963. if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
  964. continue;
  965. active--;
  966. slot = to_slot(ndd, nd_label);
  967. nd_label_free_slot(ndd, slot);
  968. dev_dbg(ndd->dev, "free: %d\n", slot);
  969. list_move_tail(&label_ent->list, &list);
  970. label_ent->label = NULL;
  971. }
  972. list_splice_tail_init(&list, &nd_mapping->labels);
  973. if (active == 0) {
  974. nd_mapping_free_labels(nd_mapping);
  975. dev_dbg(ndd->dev, "no more active labels\n");
  976. }
  977. mutex_unlock(&nd_mapping->lock);
  978. return nd_label_write_index(ndd, ndd->ns_next,
  979. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  980. }
  981. int nd_pmem_namespace_label_update(struct nd_region *nd_region,
  982. struct nd_namespace_pmem *nspm, resource_size_t size)
  983. {
  984. int i, rc;
  985. for (i = 0; i < nd_region->ndr_mappings; i++) {
  986. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  987. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  988. struct resource *res;
  989. int count = 0;
  990. if (size == 0) {
  991. rc = del_labels(nd_mapping, nspm->uuid);
  992. if (rc)
  993. return rc;
  994. continue;
  995. }
  996. for_each_dpa_resource(ndd, res)
  997. if (strncmp(res->name, "pmem", 4) == 0)
  998. count++;
  999. WARN_ON_ONCE(!count);
  1000. rc = init_labels(nd_mapping, count);
  1001. if (rc < 0)
  1002. return rc;
  1003. rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
  1004. NSLABEL_FLAG_UPDATING);
  1005. if (rc)
  1006. return rc;
  1007. }
  1008. if (size == 0)
  1009. return 0;
  1010. /* Clear the UPDATING flag per UEFI 2.7 expectations */
  1011. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1012. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1013. rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
  1014. if (rc)
  1015. return rc;
  1016. }
  1017. return 0;
  1018. }
  1019. int nd_blk_namespace_label_update(struct nd_region *nd_region,
  1020. struct nd_namespace_blk *nsblk, resource_size_t size)
  1021. {
  1022. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  1023. struct resource *res;
  1024. int count = 0;
  1025. if (size == 0)
  1026. return del_labels(nd_mapping, nsblk->uuid);
  1027. for_each_dpa_resource(to_ndd(nd_mapping), res)
  1028. count++;
  1029. count = init_labels(nd_mapping, count);
  1030. if (count < 0)
  1031. return count;
  1032. return __blk_label_update(nd_region, nd_mapping, nsblk, count);
  1033. }
  1034. int __init nd_label_init(void)
  1035. {
  1036. WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
  1037. WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
  1038. WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
  1039. WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
  1040. return 0;
  1041. }